diff --git a/AUTHORS.md b/AUTHORS.md index 54a1097b50f7a09062f8987e62db6b5f5e89e0b7..deafa641203ed9d9bd794fe92e4a91e3aaa03f63 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -25,6 +25,7 @@ | kexinzhao | Ke-Xin Zhao | | kuke | Yi-Bing Liu | | lcy-seso | Ying Cao | +| cjld | Dun Liang | | lipeng-unisound | Peng Li | | liuyuan | Yuan Liu | | livc | Zhao Li | diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 111627a932afe2642312befb5d9c65e36e4bf1d0..cabef3f713621ed030bac9ff0c126ba7d9a90f3b 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -349,10 +349,17 @@ function(cc_test TARGET_NAME) set(oneValueArgs "") set(multiValueArgs SRCS DEPS ARGS) cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + if(WIN32) + list(APPEND win32_deps shlwapi) + if("${cc_test_DEPS};" MATCHES "python;") + list(REMOVE_ITEM cc_test_DEPS python) + list(APPEND win32_deps ${PYTHON_LIBRARIES}) + endif() + endif(WIN32) add_executable(${TARGET_NAME} ${cc_test_SRCS}) target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) if(WIN32) - target_link_libraries(${TARGET_NAME} shlwapi) + target_link_libraries(${TARGET_NAME} ${win32_deps}) endif(WIN32) add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) add_test(NAME ${TARGET_NAME} @@ -679,7 +686,7 @@ function(py_test TARGET_NAME) set(multiValueArgs SRCS DEPS ARGS ENVS) cmake_parse_arguments(py_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_test(NAME ${TARGET_NAME} - COMMAND env FLAGS_init_allocated_mem=true FLAGS_cudnn_deterministic=true + COMMAND ${CMAKE_COMMAND} -E env FLAGS_init_allocated_mem=true FLAGS_cudnn_deterministic=true FLAGS_cpu_deterministic=true PYTHONPATH=${PADDLE_BINARY_DIR}/python ${py_test_ENVS} ${PYTHON_EXECUTABLE} -u ${py_test_SRCS} ${py_test_ARGS} diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index da8941c351571a8ff43974321490065079c2c0b4..541c4db1fa0914b657b3553ea20114f4bbe74464 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -103,6 +103,7 @@ paddle.fluid.layers.beam_search ArgSpec(args=['pre_ids', 'pre_scores', 'ids', 's paddle.fluid.layers.row_conv ArgSpec(args=['input', 'future_context_size', 'param_attr', 'act'], varargs=None, keywords=None, defaults=(None, None)) paddle.fluid.layers.multiplex ArgSpec(args=['inputs', 'index'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.layer_norm ArgSpec(args=['input', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None, None)) +paddle.fluid.layers.group_norm ArgSpec(args=['input', 'groups', 'epsilon', 'param_attr', 'bias_attr', 'act', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(1e-05, None, None, None, 'NCHW', None)) paddle.fluid.layers.softmax_with_cross_entropy ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index', 'numeric_stable_mode', 'return_softmax'], varargs=None, keywords=None, defaults=(False, -100, False, False)) paddle.fluid.layers.smooth_l1 ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.layers.one_hot ArgSpec(args=['input', 'depth'], varargs=None, keywords=None, defaults=None) diff --git a/paddle/fluid/framework/details/all_reduce_op_handle.cc b/paddle/fluid/framework/details/all_reduce_op_handle.cc index b8690156763e4037811245b8016982710445e6a2..a003995ae3f8e111881b4681554aa8eb17b60cc1 100644 --- a/paddle/fluid/framework/details/all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/all_reduce_op_handle.cc @@ -23,7 +23,7 @@ namespace paddle { namespace framework { namespace details { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) AllReduceOpHandle::AllReduceOpHandle(ir::Node *node, const std::vector &local_scopes, const std::vector &places, @@ -74,7 +74,7 @@ void AllReduceOpHandle::RunImpl() { } if (platform::is_gpu_place(lod_tensors[0]->place())) { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) PADDLE_ENFORCE(nccl_ctxs_, "nccl_ctxs should not be nullptr."); int dtype = -1; size_t numel = 0; diff --git a/paddle/fluid/framework/details/all_reduce_op_handle.h b/paddle/fluid/framework/details/all_reduce_op_handle.h index f6ef3a1367b91b6abf8ce74a91f73056efd0f84e..b449796fcaee73a6b84e0db2b5c76ff94bedcf08 100644 --- a/paddle/fluid/framework/details/all_reduce_op_handle.h +++ b/paddle/fluid/framework/details/all_reduce_op_handle.h @@ -20,7 +20,7 @@ #include "paddle/fluid/framework/details/op_handle_base.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) #include "paddle/fluid/platform/nccl_helper.h" #endif @@ -29,7 +29,7 @@ namespace framework { namespace details { struct AllReduceOpHandle : public OpHandleBase { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) AllReduceOpHandle(ir::Node *node, const std::vector &local_scopes, const std::vector &places, const platform::NCCLContextMap *ctxs); @@ -49,7 +49,7 @@ struct AllReduceOpHandle : public OpHandleBase { private: std::vector local_scopes_; std::vector places_; -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) const platform::NCCLContextMap *nccl_ctxs_; #endif }; diff --git a/paddle/fluid/framework/details/broadcast_op_handle.cc b/paddle/fluid/framework/details/broadcast_op_handle.cc index 8e5e5427659387d63eac21a200c1a20da493e539..d98df3bbadd391d6df9b0a65a063e43e07a06fbc 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle.cc +++ b/paddle/fluid/framework/details/broadcast_op_handle.cc @@ -82,7 +82,7 @@ void BroadcastOpHandle::BroadcastOneVar( }); } } else { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) VarHandle *out_handle = nullptr; int root_id = boost::get(in_tensor.place()).device; std::vector> broadcast_calls; diff --git a/paddle/fluid/framework/details/broadcast_op_handle.h b/paddle/fluid/framework/details/broadcast_op_handle.h index 72180fac864256ddda076c57e50ab1083c113d32..0c75e05f861636565ae855ddd534c1082d40d237 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle.h +++ b/paddle/fluid/framework/details/broadcast_op_handle.h @@ -24,7 +24,7 @@ #include "paddle/fluid/framework/selected_rows.h" #include "paddle/fluid/platform/device_context.h" -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) #include "paddle/fluid/platform/nccl_helper.h" #endif @@ -34,7 +34,7 @@ namespace details { struct BroadcastOpHandle : public OpHandleBase { public: -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) BroadcastOpHandle(ir::Node *node, const std::vector &local_scopes, const std::vector &places, const platform::NCCLContextMap *nccl_ctxs) @@ -68,7 +68,7 @@ struct BroadcastOpHandle : public OpHandleBase { std::vector local_scopes_; std::vector places_; -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) const platform::NCCLContextMap *nccl_ctxs_; #endif diff --git a/paddle/fluid/framework/details/broadcast_op_handle_test.h b/paddle/fluid/framework/details/broadcast_op_handle_test.h index 4305eb65733a7c871450949ce2c48cab013bac81..df3b3cc9ca012eabc428a7fb4c3af9be5b1c5bd5 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle_test.h +++ b/paddle/fluid/framework/details/broadcast_op_handle_test.h @@ -42,7 +42,7 @@ struct TestBroadcastOpHandle { std::vector> nodes_; std::vector place_list_; bool use_gpu_; -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) std::unique_ptr nccl_ctxs_; #endif @@ -50,7 +50,7 @@ struct TestBroadcastOpHandle { for (size_t j = 0; j < ctxs_.size(); ++j) { ctxs_[j]->Wait(); } -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) if (nccl_ctxs_) { nccl_ctxs_->WaitAll(); } @@ -60,7 +60,7 @@ struct TestBroadcastOpHandle { void InitCtxOnGpu(bool use_gpu) { use_gpu_ = use_gpu; if (use_gpu_) { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) int count = p::GetCUDADeviceCount(); if (count <= 1) { LOG(WARNING) << "Cannot test multi-gpu Broadcast, because the CUDA " @@ -84,7 +84,7 @@ struct TestBroadcastOpHandle { place_list_.push_back(p); ctxs_.emplace_back(new p::CPUDeviceContext(p)); } -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) nccl_ctxs_.reset(nullptr); #endif } @@ -106,14 +106,14 @@ struct TestBroadcastOpHandle { nodes_.emplace_back( ir::CreateNodeForTest("node0", ir::Node::Type::kOperation)); if (use_gpu_) { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) op_handle_ = new BroadcastOpHandle(nodes_.back().get(), local_scopes_, place_list_, nccl_ctxs_.get()); #else PADDLE_THROW("CUDA is not support."); #endif } else { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) op_handle_ = new BroadcastOpHandle(nodes_.back().get(), local_scopes_, place_list_, nccl_ctxs_.get()); #else diff --git a/paddle/fluid/framework/details/build_strategy.cc b/paddle/fluid/framework/details/build_strategy.cc index 37202f869508c283e1b464942cadc0ebe3eef39c..70baced0ada33c23ba05cd2722e607edf847585a 100644 --- a/paddle/fluid/framework/details/build_strategy.cc +++ b/paddle/fluid/framework/details/build_strategy.cc @@ -96,7 +96,7 @@ std::unique_ptr BuildStrategy::Apply( const std::string &loss_var_name, const std::unordered_set ¶m_names, const std::vector &local_scopes, -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) const bool use_cuda, platform::NCCLContextMap *nccl_ctxs) const { #else const bool use_cuda) const { @@ -118,7 +118,7 @@ std::unique_ptr BuildStrategy::Apply( pass->Erase("local_scopes"); pass->SetNotOwned>("local_scopes", &local_scopes); -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) platform::NCCLContextMap *nctx = use_cuda ? nccl_ctxs : nullptr; pass->Erase("nccl_ctxs"); pass->SetNotOwned("nccl_ctxs", nctx); diff --git a/paddle/fluid/framework/details/build_strategy.h b/paddle/fluid/framework/details/build_strategy.h index fc2641dbd48274b43db0b1f156e3e1128f96772e..3236c35efdbf1175c3d06e531fc551f202ae17ad 100644 --- a/paddle/fluid/framework/details/build_strategy.h +++ b/paddle/fluid/framework/details/build_strategy.h @@ -23,7 +23,7 @@ #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) #include "paddle/fluid/platform/nccl_helper.h" #endif @@ -98,7 +98,7 @@ struct BuildStrategy { const std::string &loss_var_name, const std::unordered_set ¶m_names, const std::vector &local_scopes, -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) const bool use_cuda, platform::NCCLContextMap *nccl_ctxs) const; #else const bool use_cuda) const; diff --git a/paddle/fluid/framework/details/data_balance_op_handle.cc b/paddle/fluid/framework/details/data_balance_op_handle.cc index 0b772f9b63e2cfb78175f5e0d7011db8e6a5ec20..cc562c7b102cea80e18cbd2c054c34415a7442c9 100644 --- a/paddle/fluid/framework/details/data_balance_op_handle.cc +++ b/paddle/fluid/framework/details/data_balance_op_handle.cc @@ -20,7 +20,7 @@ namespace paddle { namespace framework { namespace details { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) DataBalanceOpHandle::DataBalanceOpHandle( ir::Node *node, const std::vector &local_scopes, const std::vector &places, diff --git a/paddle/fluid/framework/details/data_balance_op_handle.h b/paddle/fluid/framework/details/data_balance_op_handle.h index 0462fb6ec713eb977f420a9cb485c0273e782496..2db18a1a7203f85aac6338576f2e68c7b37d7c69 100644 --- a/paddle/fluid/framework/details/data_balance_op_handle.h +++ b/paddle/fluid/framework/details/data_balance_op_handle.h @@ -19,7 +19,7 @@ #include "paddle/fluid/framework/details/op_handle_base.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) #include "paddle/fluid/platform/nccl_helper.h" #endif @@ -29,7 +29,7 @@ namespace details { struct DataBalanceOpHandle : public OpHandleBase { public: -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) DataBalanceOpHandle(ir::Node *node, const std::vector &local_scopes, const std::vector &places, const platform::NCCLContextMap *ctxs); diff --git a/paddle/fluid/framework/details/fused_broadcast_op_handle.h b/paddle/fluid/framework/details/fused_broadcast_op_handle.h index e37259526a5f6f57d51a0ca8bca96a18211a4790..e43d545c9c0d062a8814a2f5d27f9ca20290b9dd 100644 --- a/paddle/fluid/framework/details/fused_broadcast_op_handle.h +++ b/paddle/fluid/framework/details/fused_broadcast_op_handle.h @@ -25,7 +25,7 @@ #include "paddle/fluid/framework/selected_rows.h" #include "paddle/fluid/platform/device_context.h" -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) #include "paddle/fluid/platform/nccl_helper.h" #endif @@ -35,7 +35,7 @@ namespace details { struct FusedBroadcastOpHandle : public BroadcastOpHandle { public: -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) FusedBroadcastOpHandle(ir::Node *node, const std::vector local_scopes, const std::vector &places, diff --git a/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc b/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc index 541993c74332cc483a8b854a6b8f227c7c9a19a9..be0d941c4f9c2fe8fbb1da8ec2c11868112fcf9b 100644 --- a/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc +++ b/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc @@ -44,14 +44,14 @@ struct TestFusedBroadcastOpHandle : TestBroadcastOpHandle { nodes_.emplace_back( ir::CreateNodeForTest("fused_broadcast", ir::Node::Type::kOperation)); if (use_gpu_) { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) op_handle_ = new FusedBroadcastOpHandle( nodes_.back().get(), local_scopes_, place_list_, nccl_ctxs_.get()); #else PADDLE_THROW("CUDA is not supported."); #endif } else { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) op_handle_ = new FusedBroadcastOpHandle( nodes_.back().get(), local_scopes_, place_list_, nccl_ctxs_.get()); #else diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_pass.cc index 8c98b781301e884d5d5c7d141f3d901d74d51285..26666212ae8c4dc5ce9a45b5c51bab1f9ff1a8ab 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.cc @@ -142,7 +142,7 @@ void MultiDevSSAGraphBuilder::Init() const { places_ = Get>(kPlaces); local_scopes_ = Get>(kLocalScopes); strategy_ = Get(kStrategy); -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) nccl_ctxs_ = &Get("nccl_ctxs"); #endif @@ -431,7 +431,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( } } bool use_gpu = false; -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) use_gpu = nccl_ctxs_ != nullptr; #endif @@ -478,7 +478,7 @@ bool MultiDevSSAGraphBuilder::IsSparseGradient(const std::string &og) const { void MultiDevSSAGraphBuilder::SetCommunicationContext( OpHandleBase *op_handle, const platform::Place &p) const { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) if (nccl_ctxs_ == nullptr) { op_handle->SetDeviceContext(p, platform::DeviceContextPool::Instance().Get(p)); @@ -492,7 +492,7 @@ void MultiDevSSAGraphBuilder::SetCommunicationContext( void MultiDevSSAGraphBuilder::CreateBroadcastOp(ir::Graph *result, const std::string &p_name, size_t src_dev_id) const { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) auto *op_handle = new BroadcastOpHandle( result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation), local_scopes_, places_, nccl_ctxs_); @@ -522,7 +522,7 @@ void MultiDevSSAGraphBuilder::CreateBroadcastOp(ir::Graph *result, void MultiDevSSAGraphBuilder::CreateFusedBroadcastOp( ir::Graph *result, const std::vector> &bcast_varnames) const { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) auto *op_handle = new FusedBroadcastOpHandle( result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation), local_scopes_, places_, nccl_ctxs_); @@ -568,7 +568,7 @@ void MultiDevSSAGraphBuilder::CreateComputationalOp(ir::Graph *result, void MultiDevSSAGraphBuilder::InsertAllReduceOp(ir::Graph *result, const std::string &og) const { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) result->Get(kGraphOps).emplace_back(new AllReduceOpHandle( result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), local_scopes_, places_, nccl_ctxs_)); @@ -597,7 +597,7 @@ void MultiDevSSAGraphBuilder::InsertAllReduceOp(ir::Graph *result, void MultiDevSSAGraphBuilder::InsertDataBalanceOp( ir::Graph *result, const std::vector &datas) const { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) result->Get(kGraphOps).emplace_back(new DataBalanceOpHandle( result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation), local_scopes_, places_, nccl_ctxs_)); @@ -694,7 +694,7 @@ void MultiDevSSAGraphBuilder::CreateComputationalOps(ir::Graph *result, VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result, const std::string &og, int dst_dev_id) const { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) result->Get(kGraphOps).emplace_back(new ReduceOpHandle( result->CreateEmptyNode("reduce", ir::Node::Type::kOperation), local_scopes_, places_, nccl_ctxs_)); diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.h b/paddle/fluid/framework/details/multi_devices_graph_pass.h index f3ec2d29415240b7012f458070223469d0947166..8e462aec7dc7ce45cad592b89de0b6edde8c9146 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.h +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.h @@ -40,7 +40,7 @@ class MultiDevSSAGraphBuilder : public ir::Pass { size_t device_id) const; void Init() const; -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) mutable platform::NCCLContextMap *nccl_ctxs_; #endif diff --git a/paddle/fluid/framework/details/reduce_op_handle.cc b/paddle/fluid/framework/details/reduce_op_handle.cc index 4503123eac810917cabcf1e62cff98552ed2f742..c9f1107aeab5a21d46e828308cfcb2dde827cba6 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.cc +++ b/paddle/fluid/framework/details/reduce_op_handle.cc @@ -125,7 +125,7 @@ void ReduceOpHandle::RunImpl() { } }); } else if (paddle::platform::is_gpu_place(lod_tensors[0]->place())) { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) auto pre_in = pre_in_var->Get(); VariableVisitor::ShareDimsAndLoD(*pre_in_var, out_var); VariableVisitor::GetMutableTensor(out_var).mutable_data( diff --git a/paddle/fluid/framework/details/reduce_op_handle.h b/paddle/fluid/framework/details/reduce_op_handle.h index 999828ae457ba43541da06088ce7c25331fd05ec..846839029ca65be1bdeac2f6ea497db07a01b6cf 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.h +++ b/paddle/fluid/framework/details/reduce_op_handle.h @@ -23,7 +23,7 @@ #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/selected_rows.h" #include "paddle/fluid/platform/device_context.h" -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) #include "paddle/fluid/platform/nccl_helper.h" #endif @@ -35,7 +35,7 @@ struct ReduceOpHandle : public OpHandleBase { std::vector local_scopes_; std::vector places_; -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) const platform::NCCLContextMap *nccl_ctxs_; ReduceOpHandle(ir::Node *node, const std::vector &local_scopes, const std::vector &places, diff --git a/paddle/fluid/framework/details/reduce_op_handle_test.cc b/paddle/fluid/framework/details/reduce_op_handle_test.cc index 72299c0bfa916d3b92e1c5020ddd69dadad3701d..6cee4770e64354cf8a719b0e11b1816b345dd8bd 100644 --- a/paddle/fluid/framework/details/reduce_op_handle_test.cc +++ b/paddle/fluid/framework/details/reduce_op_handle_test.cc @@ -35,7 +35,7 @@ struct TestReduceOpHandle { std::vector gpu_list_; std::vector> ctxs_; -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) std::unique_ptr nccl_ctxs_; #endif @@ -43,7 +43,7 @@ struct TestReduceOpHandle { for (size_t j = 0; j < ctxs_.size(); ++j) { ctxs_[j]->Wait(); } -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) if (nccl_ctxs_) { nccl_ctxs_->WaitAll(); } @@ -53,7 +53,7 @@ struct TestReduceOpHandle { void InitCtxOnGpu(bool use_gpu) { use_gpu_ = use_gpu; if (use_gpu) { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) int count = p::GetCUDADeviceCount(); if (count <= 1) { LOG(WARNING) << "Cannot test multi-gpu Broadcast, because the CUDA " @@ -77,7 +77,7 @@ struct TestReduceOpHandle { gpu_list_.push_back(p); ctxs_.emplace_back(new p::CPUDeviceContext(p)); } -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) nccl_ctxs_.reset(nullptr); #endif } @@ -99,14 +99,14 @@ struct TestReduceOpHandle { nodes.emplace_back(new ir::Node("node")); if (use_gpu_) { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) op_handle_.reset(new ReduceOpHandle(nodes.back().get(), local_scopes_, gpu_list_, nccl_ctxs_.get())); #else PADDLE_THROW("CUDA is not support."); #endif } else { -#ifdef PADDLE_WITH_CUDA +#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) op_handle_.reset(new ReduceOpHandle(nodes.back().get(), local_scopes_, gpu_list_, nccl_ctxs_.get())); #else diff --git a/paddle/fluid/framework/ir/is_test_pass_tester.cc b/paddle/fluid/framework/ir/is_test_pass_tester.cc index cd2cb0c9f8a8ecc41a878cd3f711713cb5c23eb3..9696441a21661db89146c448742a992d1f7df022 100644 --- a/paddle/fluid/framework/ir/is_test_pass_tester.cc +++ b/paddle/fluid/framework/ir/is_test_pass_tester.cc @@ -15,7 +15,10 @@ #include "paddle/fluid/framework/ir/is_test_pass.h" #include - +#ifdef _WIN32 +#undef FALSE +#undef TRUE +#endif namespace paddle { namespace framework { namespace ir { diff --git a/paddle/fluid/inference/analysis/analyzer_tester.cc b/paddle/fluid/inference/analysis/analyzer_tester.cc index 84a0c3374c66f85313828332099cb372e14c7c83..7710ed7b613a180a4e24237a2cf6b5f212f677a3 100644 --- a/paddle/fluid/inference/analysis/analyzer_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_tester.cc @@ -19,6 +19,7 @@ #include "paddle/fluid/inference/analysis/ut_helper.h" #include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/api/paddle_inference_pass.h" +#include "paddle/fluid/platform/port.h" namespace paddle { namespace inference { @@ -75,7 +76,7 @@ void TestWord2vecPrediction(const std::string& model_path) { 0.000932706}; const size_t num_elements = outputs.front().data.length() / sizeof(float); // The outputs' buffers are in CPU memory. - for (size_t i = 0; i < std::min(5UL, num_elements); i++) { + for (size_t i = 0; i < std::min((size_t)5UL, num_elements); i++) { LOG(INFO) << "data: " << static_cast(outputs.front().data.data())[i]; PADDLE_ENFORCE(static_cast(outputs.front().data.data())[i], diff --git a/paddle/fluid/inference/api/helper.h b/paddle/fluid/inference/api/helper.h index 6f9d663121004470d57c17b8154d725fdf2b9689..9a393a61c4b4586009a022884e88f3f5c6392ed3 100644 --- a/paddle/fluid/inference/api/helper.h +++ b/paddle/fluid/inference/api/helper.h @@ -15,10 +15,6 @@ #pragma once #include -#if !defined(_WIN32) -#include -#else -#endif #include #include // NOLINT @@ -28,6 +24,7 @@ #include #include #include "paddle/fluid/inference/api/paddle_inference_api.h" +#include "paddle/fluid/platform/port.h" #include "paddle/fluid/string/printf.h" namespace paddle { diff --git a/paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc b/paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc index c4022225fd4526998af8526d0afb87e7a5be6336..da42688f29f044639d6d97ff09f932490d4c3d54 100644 --- a/paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc +++ b/paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include #include #include #include diff --git a/paddle/fluid/inference/tests/book/test_inference_nlp.cc b/paddle/fluid/inference/tests/book/test_inference_nlp.cc index cbcfc964c91c33ab41a72ad7fec759086ad887cc..5c1204b9e6b78e42d999b12a2b7be6f822ecf818 100644 --- a/paddle/fluid/inference/tests/book/test_inference_nlp.cc +++ b/paddle/fluid/inference/tests/book/test_inference_nlp.cc @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include #include #include // NOLINT diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index 2118fcfd4bb1589947617e462f09971fcc090b98..75fa611c0d701dd81dfe5b33231655e0959c7dbf 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -20,6 +20,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/inference/io.h" +#include "paddle/fluid/platform/port.h" #include "paddle/fluid/platform/profiler.h" DECLARE_bool(use_mkldnn); diff --git a/paddle/fluid/operators/beam_search_op_test.cc b/paddle/fluid/operators/beam_search_op_test.cc index 501807e7f3e04ae75386bfa00797d244cd9eac9c..80fdd22fbbc068b76c20c2ac6199461b094d0ac2 100644 --- a/paddle/fluid/operators/beam_search_op_test.cc +++ b/paddle/fluid/operators/beam_search_op_test.cc @@ -30,23 +30,23 @@ using std::endl; void CreateInput(LoDTensor* ids, LoDTensor* scores) { LoD lod; - vector level0({0, 2, 4}); - vector level1({0, 1, 2, 3, 4}); + vector level0{0, 2, 4}; + vector level1{0, 1, 2, 3, 4}; lod.push_back(level0); lod.push_back(level1); ids->set_lod(lod); scores->set_lod(lod); - auto dims = framework::make_ddim(vector({4, 3})); + auto dims = framework::make_ddim(vector{4, 3}); ids->Resize(dims); scores->Resize(dims); CPUPlace place; auto* ids_data = ids->mutable_data(place); auto* scores_data = scores->mutable_data(place); - vector _ids({4, 2, 5, 2, 1, 3, 3, 5, 2, 8, 2, 1}); - vector _scores( - {0.5, 0.3, 0.2, 0.6, 0.3, 0.1, 0.9, 0.5, 0.1, 0.7, 0.5, 0.1}); + vector _ids{4, 2, 5, 2, 1, 3, 3, 5, 2, 8, 2, 1}; + vector _scores{0.5f, 0.3f, 0.2f, 0.6f, 0.3f, 0.1f, + 0.9f, 0.5f, 0.1f, 0.7f, 0.5f, 0.1f}; for (int i = 0; i < 12; i++) { ids_data[i] = _ids[i]; @@ -79,8 +79,8 @@ TEST(DISABLED_beam_search_op, run) { ASSERT_EQ(sids.lod(), sscores.lod()); - vector tids({4, 2, 3, 8}); - vector tscores({0.5, 0.6, 0.9, 0.7}); + vector tids{4, 2, 3, 8}; + vector tscores{0.5f, 0.6f, 0.9f, 0.7f}; for (int i = 0; i < 4; i++) { ASSERT_EQ(tids[i], sids.data()[i]); diff --git a/paddle/fluid/operators/distributed/grpc_client.cc b/paddle/fluid/operators/distributed/grpc_client.cc index c28f86146d3040c6a26cabfb795eff67375d4b76..3548d5d9fb7743227e5509e846bd5bf0f184eca5 100644 --- a/paddle/fluid/operators/distributed/grpc_client.cc +++ b/paddle/fluid/operators/distributed/grpc_client.cc @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include #include "glog/logging.h" // For VLOG @@ -20,6 +19,7 @@ limitations under the License. */ #include "paddle/fluid/operators/distributed/grpc_client.h" #include "paddle/fluid/operators/distributed/grpc_serde.h" #include "paddle/fluid/operators/distributed/request_handler.h" +#include "paddle/fluid/platform/port.h" #include "paddle/fluid/platform/profiler.h" namespace paddle { diff --git a/paddle/fluid/operators/distributed/grpc_serde.cc b/paddle/fluid/operators/distributed/grpc_serde.cc index f27b70a5a3dd2927b51a95af7bd1b84a6e232f86..e6856676d49e867214801810949076151e34356a 100644 --- a/paddle/fluid/operators/distributed/grpc_serde.cc +++ b/paddle/fluid/operators/distributed/grpc_serde.cc @@ -15,7 +15,6 @@ limitations under the License. */ #ifdef PADDLE_WITH_CUDA #include #endif -#include #include // NOLINT #include "google/protobuf/io/coded_stream.h" @@ -26,6 +25,7 @@ limitations under the License. */ #include "paddle/fluid/operators/distributed/grpc_variable_response.h" #include "paddle/fluid/operators/distributed/proto_encoder_helper.h" #include "paddle/fluid/operators/distributed/sendrecvop_utils.h" +#include "paddle/fluid/platform/port.h" #include "paddle/fluid/platform/profiler.h" namespace paddle { diff --git a/paddle/fluid/operators/distributed/grpc_serde.h b/paddle/fluid/operators/distributed/grpc_serde.h index 7ec489e961630747ba00e68ad3603cacbb1aa485..17290d3fb4478191c59623913a82d4142d3c49f9 100644 --- a/paddle/fluid/operators/distributed/grpc_serde.h +++ b/paddle/fluid/operators/distributed/grpc_serde.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include + #include #include #include @@ -25,6 +25,7 @@ limitations under the License. */ #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/operators/distributed/sendrecvop_utils.h" +#include "paddle/fluid/platform/port.h" #include "paddle/fluid/operators/distributed/send_recv.grpc.pb.h" #include "paddle/fluid/operators/distributed/send_recv.pb.h" diff --git a/paddle/fluid/operators/distributed/sendrecvop_utils.cc b/paddle/fluid/operators/distributed/sendrecvop_utils.cc index 374fa680e3681d2e4b1d7513a9522810a15fe485..0abebb92401a799f34ed9808c9cb664470732233 100644 --- a/paddle/fluid/operators/distributed/sendrecvop_utils.cc +++ b/paddle/fluid/operators/distributed/sendrecvop_utils.cc @@ -15,12 +15,12 @@ limitations under the License. */ #ifdef PADDLE_WITH_CUDA #include #endif -#include #include // NOLINT #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/operators/distributed/sendrecvop_utils.h" #include "paddle/fluid/operators/distributed/variable_response.h" +#include "paddle/fluid/platform/port.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/distributed/sendrecvop_utils.h b/paddle/fluid/operators/distributed/sendrecvop_utils.h index 480fc59c4281edbfa5f08e07a86c5f1257adb4be..523e56fe3e4eaf9d4faa8f0bf91d0b6075e0b0e5 100644 --- a/paddle/fluid/operators/distributed/sendrecvop_utils.h +++ b/paddle/fluid/operators/distributed/sendrecvop_utils.h @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include #include #include #include @@ -24,6 +23,7 @@ limitations under the License. */ #include "paddle/fluid/framework/selected_rows.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/var_type.h" +#include "paddle/fluid/platform/port.h" #include "paddle/fluid/operators/distributed/send_recv.pb.h" diff --git a/paddle/fluid/operators/group_norm_op.cc b/paddle/fluid/operators/group_norm_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..6322659b67f6aeaeae3e29135fd52e08bf21ead1 --- /dev/null +++ b/paddle/fluid/operators/group_norm_op.cc @@ -0,0 +1,162 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/group_norm_op.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; +using DataLayout = framework::DataLayout; + +class GroupNormOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of GroupNormOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Y"), + "Output(Y) of GroupNormOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Mean"), + "Output(Mean) of GroupNormOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Variance"), + "Output(Variance) of GroupNormOp should not be null."); + + auto x_dim = ctx->GetInputDim("X"); + auto channel_num = x_dim[1]; + auto batch_size = x_dim[0]; + auto groups = ctx->Attrs().Get("groups"); + PADDLE_ENFORCE_LE( + groups, channel_num, + "'groups' must be less equal than the number of channels."); + PADDLE_ENFORCE_GE(groups, 1, "'groups' must be greater equal than 1."); + + if (ctx->HasInput("Scale")) { + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], channel_num); + } + if (ctx->HasInput("Bias")) { + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1UL); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], channel_num); + } + + ctx->SetOutputDim("Y", ctx->GetInputDim("X")); + ctx->SetOutputDim("Mean", {batch_size, groups}); + ctx->SetOutputDim("Variance", {batch_size, groups}); + ctx->ShareLoD("X", "Y"); + } +}; + +class GroupNormOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "The input tensor."); + AddInput("Scale", + "Scale is a 1-dimensional tensor of size C" + "that is applied to the output.") + .AsDispensable(); + AddInput("Bias", + "Bias is a 1-dimensional tensor of size C " + "that is applied to the output") + .AsDispensable(); + AddOutput("Y", "Result after normalization."); + AddOutput("Mean", "Mean of each group.").AsIntermediate(); + AddOutput("Variance", "Variance of each group.").AsIntermediate(); + + AddAttr("epsilon", + "Constant for numerical stability [default 1e-5].") + .SetDefault(1e-5) + .AddCustomChecker([](const float &epsilon) { + PADDLE_ENFORCE(epsilon >= 0.0f && epsilon <= 1.0f, + "'epsilon' should be between 0.0 and 1.0."); + }); + AddAttr("groups", "The number of groups that divided from channels.") + .AddCustomChecker([](const int &groups) { + PADDLE_ENFORCE_GT(groups, 0, "'groups' should be greater than zero."); + }); + + AddComment(R"DOC( +Group Normalization + +Refer to `Group Normalization `_ +)DOC"); + } +}; + +class GroupNormGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + // check input + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of GroupNormOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Mean"), + "Input(Mean) of GroupNormOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Variance"), + "Input(Variance) of GroupNormOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), + "Input(Y@GRAD) of GroupNormOp should not be null."); + + // check output + if (ctx->HasOutput(framework::GradVarName("X"))) { + ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); + } + if (ctx->HasOutput(framework::GradVarName("Scale"))) { + ctx->SetOutputDim(framework::GradVarName("Scale"), + ctx->GetInputDim("Scale")); + } + if (ctx->HasOutput(framework::GradVarName("Bias"))) { + ctx->SetOutputDim(framework::GradVarName("Bias"), + ctx->GetInputDim("Bias")); + } + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + const auto *var = ctx.InputVar(framework::GradVarName("Y")); + if (var == nullptr) { + PADDLE_THROW("can't find Y@GRAD"); + } + const Tensor *t = nullptr; + if (var->IsType()) { + t = &var->Get(); + } else if (var->IsType()) { + t = &var->Get(); + } + if (t == nullptr) { + PADDLE_THROW("can't find Y@GRAD"); + } + return framework::OpKernelType(framework::ToDataType(t->type()), + ctx.GetPlace()); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(group_norm, ops::GroupNormOp, ops::GroupNormOpMaker, + paddle::framework::DefaultGradOpDescMaker); +REGISTER_OPERATOR(group_norm_grad, ops::GroupNormGradOp); +REGISTER_OP_CPU_KERNEL( + group_norm, ops::GroupNormKernel, + ops::GroupNormKernel); +REGISTER_OP_CPU_KERNEL( + group_norm_grad, + ops::GroupNormGradKernel, + ops::GroupNormGradKernel); diff --git a/paddle/fluid/operators/group_norm_op.cu b/paddle/fluid/operators/group_norm_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..27174630227c8123a31cb1c70d5eb5f5b3ee5107 --- /dev/null +++ b/paddle/fluid/operators/group_norm_op.cu @@ -0,0 +1,292 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/operators/group_norm_op.h" + +namespace paddle { +namespace operators { + +template +__global__ void GroupNormForwardGetMeanAndVar(const T* x, int N, int C, + int imsize, int groups, + int group_size, T* mean, T* var) { + int gid = blockIdx.y; + int cid = blockIdx.x; + int bid = blockIdx.z; + int number = min(group_size, static_cast(C - gid * group_size)); + int ccid = gid * group_size + cid; + if (ccid >= C) return; + T x_mean = 0, x_var = 0; + for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { + T val = x[(bid * C + ccid) * imsize + imid]; + x_mean += val; + x_var += val * val; + } + x_mean /= number * imsize; + x_var /= number * imsize; + __shared__ T s_mem[2]; + if (threadIdx.x == 0) { + s_mem[0] = s_mem[1] = 0; + } + __syncthreads(); + paddle::platform::CudaAtomicAdd(&s_mem[0], x_mean); + paddle::platform::CudaAtomicAdd(&s_mem[1], x_var); + __syncthreads(); + if (threadIdx.x == 0) { + paddle::platform::CudaAtomicAdd(&mean[bid * groups + gid], s_mem[0]); + paddle::platform::CudaAtomicAdd(&var[bid * groups + gid], s_mem[1]); + } +} + +template +__global__ void GroupNormForward(const T* x, const T* mean, const T* var, + const T* scale, const T* bias, int N, int C, + int imsize, int groups, int group_size, + T epsilon, T* y, T* real_var) { + int gid = blockIdx.y; + int cid = blockIdx.x; + int bid = blockIdx.z; + int ccid = gid * group_size + cid; + if (ccid >= C) return; + T x_mean = mean[bid * groups + gid]; + T x_var = var[bid * groups + gid]; + x_var = x_var - x_mean * x_mean; + T var_inv = 1.0 / sqrt(x_var + epsilon); + if (cid == 0 && threadIdx.x == 0) real_var[bid * groups + gid] = x_var; + for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { + T val = x[(bid * C + ccid) * imsize + imid]; + val = (val - x_mean) * var_inv; + if (scale) val *= scale[gid * group_size + cid]; + if (bias) val += bias[gid * group_size + cid]; + y[(bid * C + ccid) * imsize + imid] = val; + } +} + +template +class GroupNormKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + const float epsilon = ctx.Attr("epsilon"); + auto* scale = ctx.Input("Scale"); + auto* bias = ctx.Input("Bias"); + auto* x = ctx.Input("X"); + + auto* y = ctx.Output("Y"); + auto* mean = ctx.Output("Mean"); + auto* var = ctx.Output("Variance"); + const auto groups = ctx.Attr("groups"); + + const auto x_dims = x->dims(); + const int group_size = (x_dims[1] - 1) / groups + 1; + + y->mutable_data(ctx.GetPlace()); + mean->mutable_data(ctx.GetPlace()); + var->mutable_data(ctx.GetPlace()); + math::SetConstant set_zero; + auto& dev_ctx = ctx.template device_context(); + Tensor temp_var; + temp_var.mutable_data(var->dims(), ctx.GetPlace()); + + set_zero(dev_ctx, mean, static_cast(0)); + set_zero(dev_ctx, &temp_var, static_cast(0)); + + auto* x_data = x->data(); + auto* y_data = y->data(); + auto* mean_data = mean->data(); + auto* var_data = var->data(); + auto* temp_var_data = temp_var.data(); + + const T* scale_data = nullptr; + if (scale) scale_data = scale->data(); + const T* bias_data = nullptr; + if (bias) bias_data = bias->data(); + + int imsize = x_dims[2] * x_dims[3]; + int block_size = std::min(512, imsize); + dim3 grid(group_size, groups, x_dims[0]); + dim3 threads(block_size, 1, 1); + GroupNormForwardGetMeanAndVar<<>>( + x_data, x_dims[0], x_dims[1], imsize, groups, group_size, mean_data, + temp_var_data); + GroupNormForward<<>>( + x_data, mean_data, temp_var_data, scale_data, bias_data, x_dims[0], + x_dims[1], imsize, groups, group_size, epsilon, y_data, var_data); + } +}; + +template +__global__ void GroupNormBackwardGetMeanAndVar( + const T* x, const T* mean, const T* var, const T* scale, const T* d_y, + int N, int C, int imsize, int groups, int group_size, T epsilon, T* d_x, + T* d_mean, T* d_var, T* d_scale, T* d_bias) { + int gid = blockIdx.y; + int cid = blockIdx.x; + int bid = blockIdx.z; + int number = min(group_size, static_cast(C - gid * group_size)); + int ccid = gid * group_size + cid; + if (ccid >= C) return; + T x_mean = mean[bid * groups + gid]; + T x_var = var[bid * groups + gid]; + T var_inv = 1.0 / sqrt(x_var + epsilon); + T d_var_inv = 0, d_x_mean = 0; + T d_mean_data = 0, d_var_data = 0, d_scale_data = 0, d_bias_data = 0; + + for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { + T tmp = x[(bid * C + ccid) * imsize + imid]; + T val = (tmp - x_mean) * var_inv; + T dval = d_y[(bid * C + ccid) * imsize + imid]; + if (d_bias) d_bias_data += dval; + if (d_scale) d_scale_data += val * dval; + if (scale) dval = dval * scale[ccid]; + d_var_data += (tmp - x_mean) * dval; + T d_tmp = dval * var_inv; + if (d_x) d_x[(bid * C + ccid) * imsize + imid] = d_tmp; + d_mean_data -= d_tmp; + } + + __shared__ T s_mem[4]; + if (threadIdx.x == 0) { + s_mem[0] = s_mem[1] = 0; + if (d_scale) s_mem[2] = 0; + if (d_bias) s_mem[3] = 0; + } + __syncthreads(); + paddle::platform::CudaAtomicAdd(&s_mem[0], d_mean_data); + paddle::platform::CudaAtomicAdd(&s_mem[1], d_var_data); + if (d_scale) paddle::platform::CudaAtomicAdd(&s_mem[2], d_scale_data); + if (d_bias) paddle::platform::CudaAtomicAdd(&s_mem[3], d_bias_data); + __syncthreads(); + if (threadIdx.x == 0) { + paddle::platform::CudaAtomicAdd(&d_mean[bid * groups + gid], s_mem[0]); + paddle::platform::CudaAtomicAdd(&d_var[bid * groups + gid], s_mem[1]); + if (d_scale) paddle::platform::CudaAtomicAdd(&d_scale[ccid], s_mem[2]); + if (d_bias) paddle::platform::CudaAtomicAdd(&d_bias[ccid], s_mem[3]); + } +} + +template +__global__ void GroupNormBackward(const T* x, const T* mean, const T* var, + const T* d_mean, const T* d_var, int N, int C, + int imsize, int groups, int group_size, + T epsilon, T* d_x) { + int gid = blockIdx.y; + int cid = blockIdx.x; + int bid = blockIdx.z; + int number = min(group_size, static_cast(C - gid * group_size)); + int ccid = gid * group_size + cid; + if (ccid >= C) return; + T x_mean = mean[bid * groups + gid]; + T x_var = var[bid * groups + gid]; + T d_x_mean = d_mean[bid * groups + gid]; + T d_var_inv = d_var[bid * groups + gid]; + + T d_x_var = + -1.0 / (2 * (x_var + epsilon) * sqrt(x_var + epsilon)) * d_var_inv; + d_x_mean -= 2 * d_x_var * x_mean; + d_x_var /= number * imsize; + d_x_mean /= number * imsize; + for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { + T tmp = x[(bid * C + ccid) * imsize + imid]; + if (d_x) + d_x[(bid * C + ccid) * imsize + imid] += d_x_mean + tmp * 2 * d_x_var; + } +} + +template +class GroupNormGradKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + const float epsilon = ctx.Attr("epsilon"); + auto* x = ctx.Input("X"); + auto* mean = ctx.Input("Mean"); + auto* var = ctx.Input("Variance"); + auto* scale = ctx.Input("Scale"); + auto* d_y = ctx.Input(framework::GradVarName("Y")); + const auto groups = ctx.Attr("groups"); + + // init output + auto* d_x = ctx.Output(framework::GradVarName("X")); + auto* d_scale = ctx.Output(framework::GradVarName("Scale")); + auto* d_bias = ctx.Output(framework::GradVarName("Bias")); + + const auto& x_dims = x->dims(); + const int group_size = (x_dims[1] - 1) / groups + 1; + + T* d_x_data = nullptr; + if (d_x) { + d_x->mutable_data(ctx.GetPlace()); + d_x_data = d_x->data(); + } + math::SetConstant set_zero; + auto& dev_ctx = ctx.template device_context(); + + Tensor temp_var; + temp_var.mutable_data(var->dims(), ctx.GetPlace()); + set_zero(dev_ctx, &temp_var, static_cast(0)); + T* temp_var_data = temp_var.data(); + + Tensor temp_mean; + temp_mean.mutable_data(var->dims(), ctx.GetPlace()); + set_zero(dev_ctx, &temp_mean, static_cast(0)); + T* temp_mean_data = temp_mean.data(); + + auto* x_data = x->data(); + auto* y_data = d_y->data(); + auto* mean_data = mean->data(); + auto* var_data = var->data(); + T* d_scale_data = nullptr; + if (d_scale) { + d_scale->mutable_data(ctx.GetPlace()); + set_zero(dev_ctx, d_scale, static_cast(0)); + d_scale_data = d_scale->data(); + } + T* d_bias_data = nullptr; + if (d_bias) { + d_bias->mutable_data(ctx.GetPlace()); + set_zero(dev_ctx, d_bias, static_cast(0)); + d_bias_data = d_bias->data(); + } + + const T* scale_data = nullptr; + if (scale) scale_data = scale->data(); + + int imsize = x_dims[2] * x_dims[3]; + int block_size = std::min(512, imsize); + dim3 grid(group_size, groups, x_dims[0]); + dim3 threads(block_size, 1, 1); + GroupNormBackwardGetMeanAndVar<<>>( + x_data, mean_data, var_data, scale_data, y_data, x_dims[0], x_dims[1], + imsize, groups, group_size, epsilon, d_x_data, temp_mean_data, + temp_var_data, d_scale_data, d_bias_data); + GroupNormBackward<<>>( + x_data, mean_data, var_data, temp_mean_data, temp_var_data, x_dims[0], + x_dims[1], imsize, groups, group_size, epsilon, d_x_data); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + group_norm, + ops::GroupNormKernel, + ops::GroupNormKernel); +REGISTER_OP_CUDA_KERNEL( + group_norm_grad, + ops::GroupNormGradKernel, + ops::GroupNormGradKernel); diff --git a/paddle/fluid/operators/group_norm_op.h b/paddle/fluid/operators/group_norm_op.h new file mode 100644 index 0000000000000000000000000000000000000000..3d6c6a46a9662e3b99b4e080b424b4794db7fcc3 --- /dev/null +++ b/paddle/fluid/operators/group_norm_op.h @@ -0,0 +1,197 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_function.h" +#include "paddle/fluid/operators/math/blas.h" +#include "paddle/fluid/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; +using DataLayout = framework::DataLayout; + +template +class GroupNormKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + const float epsilon = ctx.Attr("epsilon"); + auto* scale = ctx.Input("Scale"); + auto* bias = ctx.Input("Bias"); + auto* x = ctx.Input("X"); + + auto* y = ctx.Output("Y"); + auto* mean = ctx.Output("Mean"); + auto* var = ctx.Output("Variance"); + const auto groups = ctx.Attr("groups"); + + const auto x_dims = x->dims(); + const int group_size = (x_dims[1] - 1) / groups + 1; + + y->mutable_data(ctx.GetPlace()); + mean->mutable_data(ctx.GetPlace()); + var->mutable_data(ctx.GetPlace()); + + auto* x_data = x->data(); + auto* y_data = y->data(); + auto* mean_data = mean->data(); + auto* var_data = var->data(); + + const T* scale_data = nullptr; + if (scale) scale_data = scale->data(); + const T* bias_data = nullptr; + if (bias) bias_data = bias->data(); + + int imsize = x_dims[2] * x_dims[3]; + auto* iter_x_data = x_data; + auto* iter_y_data = y_data; + for (int bid = 0; bid < x_dims[0]; bid++) + for (int gid = 0; gid < groups; gid++) { + T x_mean = 0, x_var = 0; + int number = std::min(group_size, + static_cast(x_dims[1] - gid * group_size)); + auto* tmp = iter_x_data; + for (int cid = 0; cid < number; cid++) { + for (int imid = 0; imid < imsize; imid++, iter_x_data++) { + x_mean += iter_x_data[0]; + x_var += iter_x_data[0] * iter_x_data[0]; + } + } + x_mean /= number * imsize; + x_var /= number * imsize; + x_var = x_var - x_mean * x_mean; + T var_inv = 1.0 / sqrt(x_var + epsilon); + mean_data[bid * groups + gid] = x_mean; + var_data[bid * groups + gid] = x_var; + for (int cid = 0; cid < number; cid++) { + for (int imid = 0; imid < imsize; imid++, tmp++, iter_y_data++) { + T val = (tmp[0] - x_mean) * var_inv; + if (scale_data) val *= scale_data[gid * group_size + cid]; + if (bias_data) val += bias_data[gid * group_size + cid]; + iter_y_data[0] = val; + } + } + } + } +}; + +template +class GroupNormGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + const float epsilon = ctx.Attr("epsilon"); + auto* x = ctx.Input("X"); + auto* mean = ctx.Input("Mean"); + auto* var = ctx.Input("Variance"); + auto* scale = ctx.Input("Scale"); + auto* d_y = ctx.Input(framework::GradVarName("Y")); + const auto groups = ctx.Attr("groups"); + + // init output + auto* d_x = ctx.Output(framework::GradVarName("X")); + auto* d_scale = ctx.Output(framework::GradVarName("Scale")); + auto* d_bias = ctx.Output(framework::GradVarName("Bias")); + + const auto& x_dims = x->dims(); + const int group_size = (x_dims[1] - 1) / groups + 1; + + // TODO(liangdun): need to check d_x is null + math::SetConstant set_zero; + auto& dev_ctx = ctx.template device_context(); + T* d_x_data = nullptr; + if (d_x) { + d_x->mutable_data(ctx.GetPlace()); + set_zero(dev_ctx, d_x, static_cast(0)); + d_x_data = d_x->data(); + } + + auto* x_data = x->data(); + auto* y_data = d_y->data(); + auto* mean_data = mean->data(); + auto* var_data = var->data(); + T* d_scale_data = nullptr; + if (d_scale) { + d_scale->mutable_data(ctx.GetPlace()); + set_zero(dev_ctx, d_scale, static_cast(0)); + d_scale_data = d_scale->data(); + } + T* d_bias_data = nullptr; + if (d_bias) { + d_bias->mutable_data(ctx.GetPlace()); + set_zero(dev_ctx, d_bias, static_cast(0)); + d_bias_data = d_bias->data(); + } + + const T* scale_data = nullptr; + if (scale) scale_data = scale->data(); + + int imsize = x_dims[2] * x_dims[3]; + auto* iter_x_data = x_data; + auto* iter_d_x_data = d_x_data; + auto* iter_y_data = y_data; + for (int bid = 0; bid < x_dims[0]; bid++) + for (int gid = 0; gid < groups; gid++) { + T x_mean = mean_data[bid * groups + gid]; + T x_var = var_data[bid * groups + gid]; + T var_inv = 1.0 / sqrt(x_var + epsilon); + int number = std::min(group_size, + static_cast(x_dims[1] - gid * group_size)); + auto* tmp = iter_x_data; + auto* tmp2 = iter_d_x_data; + T d_var_inv = 0, d_x_mean = 0; + for (int cid = 0; cid < number; cid++) { + for (int imid = 0; imid < imsize; + imid++, tmp++, iter_y_data++, iter_d_x_data++) { + T val = (tmp[0] - x_mean) * var_inv; + T dval = iter_y_data[0]; + if (d_bias_data) d_bias_data[gid * group_size + cid] += dval; + if (d_scale_data) + d_scale_data[gid * group_size + cid] += val * dval; + if (scale_data) dval = scale_data[gid * group_size + cid] * dval; + + d_var_inv += (tmp[0] - x_mean) * dval; + T d_tmp = dval * var_inv; + if (d_x_data) iter_d_x_data[0] += d_tmp; + d_x_mean -= d_tmp; + } + } + + T d_x_var = + -1.0 / (2 * (x_var + epsilon) * sqrt(x_var + epsilon)) * d_var_inv; + d_x_mean -= 2 * d_x_var * x_mean; + d_x_var /= number * imsize; + d_x_mean /= number * imsize; + + iter_d_x_data = tmp2; + + if (d_x_data) { + for (int cid = 0; cid < number; cid++) { + for (int imid = 0; imid < imsize; + imid++, iter_x_data++, iter_d_x_data++) { + iter_d_x_data[0] += d_x_mean; + iter_d_x_data[0] += iter_x_data[0] * 2 * d_x_var; + } + } + } + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/cpu_vec_test.cc b/paddle/fluid/operators/math/cpu_vec_test.cc index 18a586f8dd9f01357d9facca19c51ed5c293ffd2..ad734bae425f3b3edf3ed57474285c1b5a754416 100644 --- a/paddle/fluid/operators/math/cpu_vec_test.cc +++ b/paddle/fluid/operators/math/cpu_vec_test.cc @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include #include #include @@ -22,6 +21,7 @@ limitations under the License. */ #include "gtest/gtest.h" #include "paddle/fluid/operators/math/cpu_vec.h" +#include "paddle/fluid/platform/port.h" inline double GetCurrentUS() { struct timeval time; diff --git a/paddle/fluid/operators/math/im2col_test.cc b/paddle/fluid/operators/math/im2col_test.cc index ae2c90b33a4298ada4fd01aa2a44ebdf10d036d4..521cd7801abd6bea0c4f27422141742921dddf53 100644 --- a/paddle/fluid/operators/math/im2col_test.cc +++ b/paddle/fluid/operators/math/im2col_test.cc @@ -14,9 +14,9 @@ limitations under the License. */ #include "paddle/fluid/operators/math/im2col.h" #include -#include #include #include "paddle/fluid/operators/math/im2col_cfo_cpu.h" +#include "paddle/fluid/platform/port.h" template void testIm2col() { diff --git a/paddle/fluid/operators/math/jit_kernel_test.cc b/paddle/fluid/operators/math/jit_kernel_test.cc index b6c62a26348cdc20582cf7465f93026402051587..8662e1c50dea48005dab8b10e3e8dc0dddc9152c 100644 --- a/paddle/fluid/operators/math/jit_kernel_test.cc +++ b/paddle/fluid/operators/math/jit_kernel_test.cc @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/jit_kernel.h" -#include #include // for exp #include // for memcpy #include @@ -22,6 +21,7 @@ limitations under the License. */ #include "gflags/gflags.h" #include "glog/logging.h" #include "gtest/gtest.h" +#include "paddle/fluid/platform/port.h" #ifdef PADDLE_WITH_MKLML #include "paddle/fluid/platform/dynload/mklml.h" diff --git a/paddle/fluid/platform/cudnn_helper.h b/paddle/fluid/platform/cudnn_helper.h index 682b0c0ff39b71f08fe1a8b0c9c7b7d386b67738..61a25064d17994e3ce5853017263f24a859c69be 100644 --- a/paddle/fluid/platform/cudnn_helper.h +++ b/paddle/fluid/platform/cudnn_helper.h @@ -62,7 +62,7 @@ inline const char* cudnnGetErrorString(cudnnStatus_t status) { #define CUDNN_ENFORCE(condition) \ do { \ - cudnnStatus_t status = condition; \ + auto status = condition; \ if (UNLIKELY(status != CUDNN_STATUS_SUCCESS)) { \ PADDLE_THROW(::paddle::platform::cudnnGetErrorString(status)); \ } \ diff --git a/paddle/fluid/platform/dynload/cudnn.h b/paddle/fluid/platform/dynload/cudnn.h index 1a83ac7780a01fd3c20bc85baaf14e6ca3f8eb8c..db62377898339def415a13d185f85f34de326d7f 100644 --- a/paddle/fluid/platform/dynload/cudnn.h +++ b/paddle/fluid/platform/dynload/cudnn.h @@ -48,13 +48,13 @@ extern void EnforceCUDNNLoaded(const char* fn_name); #else -#define DECLARE_DYNAMIC_LOAD_CUDNN_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - inline cudnnStatus_t operator()(Args... args) { \ - return ::__name(args...); \ - } \ - }; \ +#define DECLARE_DYNAMIC_LOAD_CUDNN_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + inline auto operator()(Args... args) { \ + return ::__name(args...); \ + } \ + }; \ extern DynLoad__##__name __name #endif diff --git a/paddle/fluid/platform/gpu_info.cc b/paddle/fluid/platform/gpu_info.cc index c78f159ad25a17b38333a57a0650d9843c4c5632..e0d0051ad0d8f552d34ac9f193f36c39fa8ac1a6 100644 --- a/paddle/fluid/platform/gpu_info.cc +++ b/paddle/fluid/platform/gpu_info.cc @@ -19,7 +19,10 @@ limitations under the License. */ #include "gflags/gflags.h" #include "paddle/fluid/platform/enforce.h" -DEFINE_double(fraction_of_gpu_memory_to_use, 0.92, +// fraction_of_gpu_memory_to_use cannot be too high on windows, +// since the win32 graphic sub-system can occupy some GPU memory +// which may lead to insufficient memory left for paddle +DEFINE_double(fraction_of_gpu_memory_to_use, 0.5, "Allocate a trunk of gpu memory that is this fraction of the " "total gpu memory size. Future memory usage will be allocated " "from the trunk. If the trunk doesn't have enough gpu memory, " diff --git a/paddle/fluid/platform/stream_callback_manager.h b/paddle/fluid/platform/stream_callback_manager.h index 11c68f3449ee26b64c121acd081479b37c94fac4..8dcfc4e748f27d202ced34e7efd7c411f29874a4 100644 --- a/paddle/fluid/platform/stream_callback_manager.h +++ b/paddle/fluid/platform/stream_callback_manager.h @@ -18,7 +18,7 @@ #include #include #include -#include "ThreadPool.h" +#include #include "paddle/fluid/platform/enforce.h" namespace paddle { diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 6cc3a1739a5cc4d4e87679ec4e345edefba47a82..795800fd51763759c0f660e3eb60625afe669881 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -37,6 +37,7 @@ limitations under the License. */ #include "paddle/fluid/memory/allocation/allocator_strategy.h" #include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h" +#include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/place.h" @@ -86,6 +87,9 @@ bool IsCompiledWithDIST() { } PYBIND11_PLUGIN(core) { + // Not used, just make sure cpu_info.cc is linked. + paddle::platform::CpuTotalPhysicalMemory(); + paddle::memory::allocation::UseAllocatorStrategyGFlag(); py::module m("core", "C++ core of PaddlePaddle"); diff --git a/paddle/legacy/cuda/include/hl_warpctc_wrap.h b/paddle/legacy/cuda/include/hl_warpctc_wrap.h index 0857bd1aa1b3c66a5692b25005b7c342669afea6..09cbd6d450f77f510ad28fd0e9e86efa19c5328d 100644 --- a/paddle/legacy/cuda/include/hl_warpctc_wrap.h +++ b/paddle/legacy/cuda/include/hl_warpctc_wrap.h @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifndef _WIN32 #ifndef HL_WARPCTC_WRAP_H_ #define HL_WARPCTC_WRAP_H_ - #include "ctc.h" #include "hl_base.h" @@ -91,3 +91,4 @@ extern void hl_warpctc_get_workspace_size(const int* cpuLabelLengths, size_t* bytes); #endif // HL_WARPCTC_WRAP_H_ +#endif diff --git a/paddle/legacy/cuda/src/hl_cuda_device.cc b/paddle/legacy/cuda/src/hl_cuda_device.cc index 501e3b0f3be02b9364f9182b2484d542f0f39889..a6e27a37ffef9828de80613facc737dee816f5ce 100644 --- a/paddle/legacy/cuda/src/hl_cuda_device.cc +++ b/paddle/legacy/cuda/src/hl_cuda_device.cc @@ -132,11 +132,15 @@ inline pid_t gettid() { uint64_t tid; pthread_threadid_np(NULL, &tid); #else +#ifndef _WIN32 #ifndef __NR_gettid #define __NR_gettid 224 #endif pid_t tid = syscall(__NR_gettid); #endif +#else // _WIN32 + pid_t tid = _getpid(); +#endif // _WIN32 CHECK_NE((int)tid, -1); return tid; } diff --git a/paddle/legacy/utils/ThreadLocal.h b/paddle/legacy/utils/ThreadLocal.h index c5b07506d36875ead65887ea2e221e762be0d621..6268b73a85540c25d93b07f2c3aad74c1802aa72 100644 --- a/paddle/legacy/utils/ThreadLocal.h +++ b/paddle/legacy/utils/ThreadLocal.h @@ -14,10 +14,12 @@ limitations under the License. */ #pragma once +#ifndef _WIN32 #include #include -#include #include +#endif +#include #include #include #include diff --git a/paddle/legacy/utils/Util.h b/paddle/legacy/utils/Util.h index e6f05e30d308b8b94935897e947350934a5971ee..3a878b2b30127f0ff4c785a1720f27849eca6fda 100644 --- a/paddle/legacy/utils/Util.h +++ b/paddle/legacy/utils/Util.h @@ -14,7 +14,9 @@ limitations under the License. */ #pragma once +#ifndef _WIN32 #include // for syscall() +#endif #include #include #include @@ -40,6 +42,31 @@ inline int rand_r(unsigned int* seedp) { } #endif +#ifdef _WIN32 +#define NOMINMAX // msvc max/min macro conflict with std::min/max +#include + +template +inline int __builtin_clz(const T& value) { + DWORD leadning_zero = 0; + if (_BitScanReverse(&leadning_zero, value)) { + return static_cast(sizeof(T) * 8 - leadning_zero); + } else { + return static_cast(0); + } +} + +inline int __builtin_clzl(const unsigned long& value) { + return __builtin_clz(value); +} + +inline int __builtin_clzll(const unsigned long long& value) { + return __builtin_clz(value); +} + +#define pid_t int +#endif + /** * Loop over the elements in a container * TODO(yuyang18): It's this foreach useful? Why not use C++ 11 foreach, diff --git a/paddle/testing/CMakeLists.txt b/paddle/testing/CMakeLists.txt index 22644818994134d4797edfae8d156a005c103d52..614596958e3c522bca3d692764487106ba1e223f 100644 --- a/paddle/testing/CMakeLists.txt +++ b/paddle/testing/CMakeLists.txt @@ -3,8 +3,10 @@ if(WITH_TESTING) add_library(paddle_test_main STATIC TestMain.cpp) add_dependencies(paddle_test_main paddle_proto ${external_project_dependencies}) - add_library(paddle_test_util STATIC TestUtil.cpp) - add_dependencies(paddle_test_util paddle_proto ${external_project_dependencies}) + if(NOT WIN32) + add_library(paddle_test_util STATIC TestUtil.cpp) + add_dependencies(paddle_test_util paddle_proto ${external_project_dependencies}) + endif(NOT WIN32) if(NOT MOBILE_INFERENCE) cc_library(paddle_gtest_main SRCS paddle_gtest_main.cc DEPS device_context memory gtest gflags) endif() diff --git a/python/paddle/fluid/contrib/utils/__init__.py b/python/paddle/fluid/contrib/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..df6d367782327f7b22e72ab88d6b6cc26c9d5bc9 --- /dev/null +++ b/python/paddle/fluid/contrib/utils/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +from . import hdfs_utils +from .hdfs_utils import * + +__all__ = hdfs_utils.__all__ diff --git a/python/paddle/fluid/contrib/utils/hdfs_utils.py b/python/paddle/fluid/contrib/utils/hdfs_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..251665d85e166f4ebf66eced7a5889ee9fc23e08 --- /dev/null +++ b/python/paddle/fluid/contrib/utils/hdfs_utils.py @@ -0,0 +1,505 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""HDFS Utils""" + +import os +import subprocess +import multiprocessing +from datetime import datetime + +import re +import copy +import errno + +import logging + +__all__ = ["HDFSClient", "multi_download"] + +logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s') +_logger = logging.getLogger("hdfs_utils") +_logger.setLevel(logging.INFO) + + +class HDFSClient(object): + def __init__(self, hadoop_home, configs): + self.pre_commands = [] + hadoop_bin = '%s/bin/hadoop' % hadoop_home + self.pre_commands.append(hadoop_bin) + dfs = 'fs' + self.pre_commands.append(dfs) + + for k, v in configs.iteritems(): + config_command = '-D%s=%s' % (k, v) + self.pre_commands.append(config_command) + + def __run_hdfs_cmd(self, commands, retry_times=5): + whole_commands = copy.deepcopy(self.pre_commands) + whole_commands.extend(commands) + + print('Running system command: {0}'.format(' '.join(whole_commands))) + + ret_code = 0 + ret_out = None + ret_err = None + for x in range(retry_times + 1): + proc = subprocess.Popen( + whole_commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (output, errors) = proc.communicate() + ret_code, ret_out, ret_err = proc.returncode, output, errors + if ret_code: + _logger.warn( + 'Times: %d, Error running command: %s. Return code: %d, Error: %s' + % (x, ' '.join(whole_commands), proc.returncode, errors)) + else: + break + return ret_code, ret_out, ret_err + + def upload(self, hdfs_path, local_path, overwrite=False, retry_times=5): + """ + upload the local file to hdfs + args: + local_file_path: the local file path + remote_file_path: default value(${OUTPUT_PATH}/${SYS_USER_ID}/${SYS_JOB_ID}/tmp) + return: + True or False + """ + assert hdfs_path is not None + assert local_path is not None and os.path.exists(local_path) + + if os.path.isdir(local_path): + _logger.warn( + "The Local path: {} is dir and I will support it later, return". + format(local_path)) + return + + base = os.path.basename(local_path) + if not self.is_exist(hdfs_path): + self.makedirs(hdfs_path) + else: + if self.is_exist(os.path.join(hdfs_path, base)): + if overwrite: + _logger.error( + "The HDFS path: {} is exist and overwrite is True, delete it". + format(hdfs_path)) + self.delete(hdfs_path) + else: + _logger.error( + "The HDFS path: {} is exist and overwrite is False, return". + format(hdfs_path)) + return False + + put_commands = ["-put", local_path, hdfs_path] + returncode, output, errors = self.__run_hdfs_cmd(put_commands, + retry_times) + if returncode: + _logger.error("Put local path: {} to HDFS path: {} failed".format( + local_path, hdfs_path)) + return False + else: + _logger.info("Put local path: {} to HDFS path: {} successfully". + format(local_path, hdfs_path)) + return True + + def download(self, hdfs_path, local_path, overwrite=False, unzip=False): + """ + download from hdfs + args: + local_file_path: the local file path + remote_file_path: remote dir on hdfs + return: + True or False + """ + _logger.info('Downloading %r to %r.', hdfs_path, local_path) + _logger.info('Download of %s to %r complete.', hdfs_path, local_path) + + if not self.is_exist(hdfs_path): + print("HDFS path: {} do not exist".format(hdfs_path)) + return False + if self.is_dir(hdfs_path): + _logger.error( + "The HDFS path: {} is dir and I will support it later, return". + format(hdfs_path)) + + if os.path.exists(local_path): + base = os.path.basename(hdfs_path) + local_file = os.path.join(local_path, base) + if os.path.exists(local_file): + if overwrite: + os.remove(local_file) + else: + _logger.error( + "The Local path: {} is exist and overwrite is False, return". + format(local_file)) + return False + + self.make_local_dirs(local_path) + + download_commands = ["-get", hdfs_path, local_path] + returncode, output, errors = self.__run_hdfs_cmd(download_commands) + if returncode: + _logger.error("Get local path: {} from HDFS path: {} failed".format( + local_path, hdfs_path)) + return False + else: + _logger.info("Get local path: {} from HDFS path: {} successfully". + format(local_path, hdfs_path)) + return True + + def is_exist(self, hdfs_path=None): + """ + whether the remote hdfs path exists? + args: + remote_file_path: default value(${OUTPUT_PATH}/${SYS_USER_ID}/${SYS_JOB_ID}/tmp) + fs_name: The default values are the same as in the job configuration + fs_ugi: The default values are the same as in the job configuration + return: + True or False + """ + exist_cmd = ['-test', '-e', hdfs_path] + returncode, output, errors = self.__run_hdfs_cmd( + exist_cmd, retry_times=1) + + if returncode: + _logger.error("HDFS is_exist HDFS path: {} failed".format( + hdfs_path)) + return False + else: + _logger.info("HDFS is_exist HDFS path: {} successfully".format( + hdfs_path)) + return True + + def is_dir(self, hdfs_path=None): + """ + whether the remote hdfs path exists? + args: + remote_file_path: default value(${OUTPUT_PATH}/${SYS_USER_ID}/${SYS_JOB_ID}/tmp) + fs_name: The default values are the same as in the job configuration + fs_ugi: The default values are the same as in the job configuration + return: + True or False + """ + + if not self.is_exist(hdfs_path): + return False + + dir_cmd = ['-test', '-d', hdfs_path] + returncode, output, errors = self.__run_hdfs_cmd(dir_cmd, retry_times=1) + + if returncode: + _logger.error("HDFS path: {} failed is not a directory".format( + hdfs_path)) + return False + else: + _logger.info("HDFS path: {} successfully is a directory".format( + hdfs_path)) + return True + + def delete(self, hdfs_path): + """Remove a file or directory from HDFS. + + :param hdfs_path: HDFS path. + :param recursive: Recursively delete files and directories. By default, + this method will raise an :class:`HdfsError` if trying to delete a + non-empty directory. + + This function returns `True` if the deletion was successful and `False` if + no file or directory previously existed at `hdfs_path`. + + """ + _logger.info('Deleting %r.', hdfs_path) + + if not self.is_exist(hdfs_path): + _logger.warn("HDFS path: {} do not exist".format(hdfs_path)) + return True + + if self.is_dir(hdfs_path): + del_cmd = ['-rmr', hdfs_path] + else: + del_cmd = ['-rm', hdfs_path] + + returncode, output, errors = self.__run_hdfs_cmd(del_cmd, retry_times=0) + + if returncode: + _logger.error("HDFS path: {} delete files failure".format( + hdfs_path)) + return False + else: + _logger.info("HDFS path: {} delete files successfully".format( + hdfs_path)) + return True + + def rename(self, hdfs_src_path, hdfs_dst_path, overwrite=False): + """Move a file or folder. + + :param hdfs_src_path: Source path. + :param hdfs_dst_path: Destination path. If the path already exists and is + a directory, the source will be moved into it. If the path exists and is + a file, or if a parent destination directory is missing, this method will + raise an :class:`HdfsError`. + + """ + assert hdfs_src_path is not None + assert hdfs_dst_path is not None + + if not self.is_exist(hdfs_src_path): + _logger.info("HDFS path do not exist: {}".format(hdfs_src_path)) + if self.is_exist(hdfs_dst_path) and not overwrite: + _logger.error("HDFS path is exist: {} and overwrite=False".format( + hdfs_dst_path)) + + rename_command = ['-mv', hdfs_src_path, hdfs_dst_path] + returncode, output, errors = self.__run_hdfs_cmd( + rename_command, retry_times=1) + + if returncode: + _logger.error("HDFS rename path: {} to {} failed".format( + hdfs_src_path, hdfs_dst_path)) + return False + else: + _logger.info("HDFS rename path: {} to {} successfully".format( + hdfs_src_path, hdfs_dst_path)) + return True + + @staticmethod + def make_local_dirs(local_path): + try: + os.makedirs(local_path) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + def makedirs(self, hdfs_path): + """Create a remote directory, recursively if necessary. + + :param hdfs_path: Remote path. Intermediate directories will be created + appropriately. + """ + _logger.info('Creating directories to %r.', hdfs_path) + assert hdfs_path is not None + + if self.is_exist(hdfs_path): + return + + mkdirs_commands = ['-mkdir', hdfs_path] + returncode, output, errors = self.__run_hdfs_cmd( + mkdirs_commands, retry_times=1) + + if returncode: + _logger.error("HDFS mkdir path: {} failed".format(hdfs_path)) + return False + else: + _logger.error("HDFS mkdir path: {} successfully".format(hdfs_path)) + return True + + def ls(self, hdfs_path): + assert hdfs_path is not None + + if not self.is_exist(hdfs_path): + return [] + + ls_commands = ['-ls', hdfs_path] + returncode, output, errors = self.__run_hdfs_cmd( + ls_commands, retry_times=1) + + if returncode: + _logger.error("HDFS list path: {} failed".format(hdfs_path)) + return [] + else: + _logger.info("HDFS list path: {} successfully".format(hdfs_path)) + + ret_lines = [] + regex = re.compile('\s+') + out_lines = output.strip().split("\n") + for line in out_lines: + re_line = regex.split(line) + if len(re_line) == 8: + ret_lines.append(re_line[7]) + return ret_lines + + def lsr(self, hdfs_path, only_file=True, sort=True): + def sort_by_time(v1, v2): + v1_time = datetime.strptime(v1[1], '%Y-%m-%d %H:%M') + v2_time = datetime.strptime(v2[1], '%Y-%m-%d %H:%M') + return v1_time > v2_time + + assert hdfs_path is not None + + if not self.is_exist(hdfs_path): + return [] + + ls_commands = ['-lsr', hdfs_path] + returncode, output, errors = self.__run_hdfs_cmd( + ls_commands, retry_times=1) + + if returncode: + _logger.error("HDFS list all files: {} failed".format(hdfs_path)) + return [] + else: + _logger.info("HDFS list all files: {} successfully".format( + hdfs_path)) + lines = [] + regex = re.compile('\s+') + out_lines = output.strip().split("\n") + for line in out_lines: + re_line = regex.split(line) + if len(re_line) == 8: + if only_file and re_line[0][0] == "d": + continue + else: + lines.append( + (re_line[7], re_line[5] + " " + re_line[6])) + if sort: + sorted(lines, cmp=sort_by_time) + ret_lines = [ret[0] for ret in lines] + return ret_lines + + +def multi_upload(client, + hdfs_path, + local_path, + multi_processes=5, + overwrite=False): + """ + :param overwrite: will overwrite hdfs file or not + :param multi_processes: the upload data process at the same time, default=5 + :param client: instance of HDFSClient + :param hdfs_path: path on hdfs + :param local_path: path on local + :return: + """ + + def __subprocess_upload(datas): + for data in datas: + re_path = os.path.relpath(os.path.dirname(data), local_path) + hdfs_re_path = os.path.join(hdfs_path, re_path) + client.upload(hdfs_re_path, data, overwrite, retry_times=5) + + def get_local_files(path): + rlist = [] + + if not os.path.isdir(path): + return rlist + + for dirname, folder, files in os.walk(path): + for i in files: + t = os.path.join(dirname, i) + rlist.append(t) + return rlist + + assert isinstance(client, HDFSClient) + + all_files = get_local_files(local_path) + if not all_files: + _logger.info("there are nothing need to upload, exit") + return + _logger.info("Start {} multi process to upload datas".format( + multi_processes)) + procs = [] + for i in range(multi_processes): + process_datas = all_files[i::multi_processes] + p = multiprocessing.Process( + target=__subprocess_upload, args=(process_datas, )) + procs.append(p) + p.start() + + # complete the processes + for proc in procs: + proc.join() + + _logger.info("Finish {} multi process to upload datas".format( + multi_processes)) + + +def multi_download(client, + hdfs_path, + local_path, + trainer_id, + trainers, + multi_processes=5): + """ + multi_download + :param client: instance of HDFSClient + :param hdfs_path: path on hdfs + :param local_path: path on local + :param trainer_id: current trainer id + :param trainers: all trainers number + :param multi_processes: the download data process at the same time, default=5 + :return: None + """ + + def __subprocess_download(datas): + for data in datas: + re_path = os.path.relpath(os.path.dirname(data), hdfs_path) + local_re_path = os.path.join(local_path, re_path) + client.download(data, local_re_path) + + assert isinstance(client, HDFSClient) + + client.make_local_dirs(local_path) + _logger.info("Make local dir {} successfully".format(local_path)) + + all_need_download = client.lsr(hdfs_path, sort=True) + need_download = all_need_download[trainer_id::trainers] + _logger.info("Get {} files From all {} files need to be download from {}". + format(len(need_download), len(all_need_download), hdfs_path)) + + _logger.info("Start {} multi process to download datas".format( + multi_processes)) + procs = [] + for i in range(multi_processes): + process_datas = need_download[i::multi_processes] + p = multiprocessing.Process( + target=__subprocess_download, args=(process_datas, )) + procs.append(p) + p.start() + + # complete the processes + for proc in procs: + proc.join() + + _logger.info("Finish {} multi process to download datas".format( + multi_processes)) + + local_downloads = [] + for data in need_download: + data_name = os.path.basename(data) + re_path = os.path.relpath(os.path.dirname(data), hdfs_path) + local_re_path = os.path.join(local_path, re_path, data_name) + local_downloads.append(local_re_path) + + return local_downloads + + +if __name__ == "__main__": + hadoop_home = "/home/client/hadoop-client/hadoop/" + + configs = { + "fs.default.name": "hdfs://xxx.hadoop.com:54310", + "hadoop.job.ugi": "hello,hello123" + } + + client = HDFSClient(hadoop_home, configs) + + client.ls("/user/com/train-25") + files = client.lsr("/user/com/train-25/models") + + downloads = multi_download( + client, + "/user/com/train-25/model", + "/home/xx/data1", + 1, + 5, + multi_processes=5) + + multi_upload(client, "/user/com/train-25/model", "/home/xx/data1") diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index e0cc09a4c76caee7496f935682ea0d1788bc4bcb..ccd9175b64d46d745c8be5f64d7ddc21a117c181 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -85,6 +85,7 @@ __all__ = [ 'row_conv', 'multiplex', 'layer_norm', + 'group_norm', 'softmax_with_cross_entropy', 'smooth_l1', 'one_hot', @@ -2547,6 +2548,84 @@ def layer_norm(input, return helper.append_activation(layer_norm_out) +@templatedoc() +def group_norm(input, + groups, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + act=None, + data_layout='NCHW', + name=None): + """ + **Group Normalization Layer** + + Refer to `Group Normalization ` + + Args: + input(Variable): The input tensor variable. + groups(int): The number of groups that divided from channels. + epsilon(float): The small value added to the variance to prevent + division by zero. + param_attr(ParamAttr|None): The parameter attribute for the learnable + scale :math:`g`. If it is set to False, no scale will be added to the output units. + If it is set to None, the bias is initialized one. Default: None. + bias_attr(ParamAttr|None): The parameter attribute for the learnable + bias :math:`b`. If it is set to False, no bias will be added to the output units. + If it is set to None, the bias is initialized zero. Default: None. + act(str): Activation to be applied to the output of group normalizaiton. + data_layout(string|NCHW): Only NCHW is supported. + name (str): The name of this layer. It is optional. + + Returns: + Variable: A tensor variable which is the result after applying group normalization on the input. + + Examples: + + >>> data = fluid.layers.data(name='data', shape=[8, 32, 32], + >>> dtype='float32') + >>> x = fluid.layers.group_norm(input=data, groups=4) + """ + helper = LayerHelper('group_norm', **locals()) + dtype = helper.input_dtype() + + # create intput and parameters + inputs = {'X': input} + input_shape = input.shape + if data_layout != 'NCHW': + raise ValueError("unsupported data layout:" + data_layout) + param_shape = [input_shape[1]] + if param_attr: + scale = helper.create_parameter( + attr=helper.param_attr, + shape=param_shape, + dtype=dtype, + default_initializer=Constant(1.0)) + inputs['Scale'] = scale + if bias_attr: + bias = helper.create_parameter( + attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True) + inputs['Bias'] = bias + + # create output + mean_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) + variance_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) + group_norm_out = helper.create_tmp_variable(dtype) + + helper.append_op( + type="group_norm", + inputs=inputs, + outputs={ + "Y": group_norm_out, + "Mean": mean_out, + "Variance": variance_out, + }, + attrs={"epsilon": epsilon, + "groups": groups}) + + return helper.append_activation(group_norm_out) + + def conv2d_transpose(input, num_filters, output_size=None, diff --git a/python/paddle/fluid/metrics.py b/python/paddle/fluid/metrics.py index f65b37903a35fa2bf9f2c2b2f169ce6fd4c478db..829154f1b23d6e49bf963762be6b6488c98ec94a 100644 --- a/python/paddle/fluid/metrics.py +++ b/python/paddle/fluid/metrics.py @@ -46,8 +46,8 @@ def _is_numpy_(var): def _is_number_(var): - return isinstance(var, int) or isinstance(var, float) or (isinstance( - var, np.ndarray) and var.shape == (1, )) + return isinstance(var, int) or isinstance(var, np.int64) or isinstance( + var, float) or (isinstance(var, np.ndarray) and var.shape == (1, )) def _is_number_or_matrix_(var): diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 510d0304f04ff1e7dc4c9bf37169778f85ca6db8..4fa69191ad50f3953de658d2aeb52668cfd1fb63 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -23,7 +23,9 @@ if(NOT WITH_DISTRIBUTE) LIST(REMOVE_ITEM TEST_OPS test_dist_text_classification) endif(NOT WITH_DISTRIBUTE) -if (${CUDNN_MAJOR_VERSION} VERSION_LESS 7) +if (NOT ${WITH_GPU}) + LIST(REMOVE_ITEM TEST_OPS test_conv2d_fusion_op) +elseif(${CUDNN_MAJOR_VERSION} VERSION_LESS 7) LIST(REMOVE_ITEM TEST_OPS test_conv2d_fusion_op) endif() @@ -79,10 +81,12 @@ list(REMOVE_ITEM TEST_OPS test_dist_se_resnext) list(REMOVE_ITEM TEST_OPS test_dist_transformer) list(REMOVE_ITEM TEST_OPS test_parallel_executor_transformer) list(REMOVE_ITEM TEST_OPS test_image_classification_resnet) +list(REMOVE_ITEM TEST_OPS test_interpolate_op) foreach(TEST_OP ${TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP}) endforeach(TEST_OP) py_test_modules(test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR} SERIAL) +py_test_modules(test_interpolate_op MODULES test_interpolate_op SERIAL) if(WITH_DISTRIBUTE) py_test_modules(test_dist_train MODULES test_dist_train SERIAL) set_tests_properties(test_listen_and_serv_op PROPERTIES TIMEOUT 20) diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index c195a28e452fbe073a9afb5d650f538176f688fd..271b9c740fd99554e9a7aa8d476a52cf6385b1d9 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -381,8 +381,8 @@ class OpTest(unittest.TestCase): outs.sort(key=len) checker(outs) - def __assert_is_close(self, numeric_grads, analytic_grads, names, - max_relative_error, msg_prefix): + def _assert_is_close(self, numeric_grads, analytic_grads, names, + max_relative_error, msg_prefix): for a, b, name in six.moves.zip(numeric_grads, analytic_grads, names): abs_a = np.abs(a) @@ -451,9 +451,9 @@ class OpTest(unittest.TestCase): analytic_grads = self._get_gradient(inputs_to_check, place, output_names, no_grad_set) - self.__assert_is_close(numeric_grads, analytic_grads, inputs_to_check, - max_relative_error, - "Gradient Check On %s" % str(place)) + self._assert_is_close(numeric_grads, analytic_grads, inputs_to_check, + max_relative_error, + "Gradient Check On %s" % str(place)) @staticmethod def _numpy_to_lod_tensor(np_value, lod, place): diff --git a/python/paddle/fluid/tests/unittests/test_group_norm_op.py b/python/paddle/fluid/tests/unittests/test_group_norm_op.py new file mode 100644 index 0000000000000000000000000000000000000000..0b6d039f050898793b69312f50f6709d66d080cd --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_group_norm_op.py @@ -0,0 +1,143 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import unittest +import numpy as np + +from operator import mul +import paddle.fluid.core as core +import paddle.fluid as fluid +from op_test import OpTest + +from testsuite import create_op + + +def group_norm_naive(x, scale, bias, epsilon, groups): + N, C, H, W = x.shape + G = groups + x = x.reshape((N * G, -1)) + mean = np.mean(x, axis=1, keepdims=True) + var = np.var(x, axis=1, keepdims=True) + output = (x - mean) / np.sqrt(var + epsilon) + output = output.reshape((N, C, H, W)) * scale.reshape( + (-1, 1, 1)) + bias.reshape((-1, 1, 1)) + return output, mean.reshape((N, G)), var.reshape((N, G)) + + +class TestGroupNormOp(OpTest): + def setUp(self): + self.op_type = "group_norm" + self.data_format = "NCHW" + self.dtype = np.float32 + self.shape = (2, 4, 3, 3) + self.attrs = {'epsilon': 1e-5, 'groups': 2} + self.compare_between_place = False + self.init_test_case() + + input = np.random.random(self.shape).astype(self.dtype) + scale = np.random.random([self.shape[1]]).astype(self.dtype) + bias = np.random.random([self.shape[1]]).astype(self.dtype) + output, mean, var = group_norm_naive( + input, scale, bias, self.attrs['epsilon'], self.attrs['groups']) + + self.inputs = { + 'X': OpTest.np_dtype_to_fluid_dtype(input), + 'Scale': OpTest.np_dtype_to_fluid_dtype(scale), + 'Bias': OpTest.np_dtype_to_fluid_dtype(bias) + } + self.outputs = {'Y': output, 'Mean': mean, 'Variance': var} + + def test_check_output(self): + atol = 1e-4 + place = core.CPUPlace() + self.check_output_with_place(place, atol=atol) + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + self.check_output_with_place(place, atol=atol) + + def do_compare_between_place(self): + if not core.is_compiled_with_cuda(): return + place = core.CPUPlace() + place2 = core.CUDAPlace(0) + self.scope = core.Scope() + op_inputs = self.inputs if hasattr(self, "inputs") else dict() + op_outputs = self.outputs if hasattr(self, "outputs") else dict() + op_attrs = self.attrs if hasattr(self, "attrs") else dict() + self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs, + op_attrs) + inputs_to_check = set(['X', 'Scale', 'Bias']) + output_names = 'Y' + cpu_grads = self._get_gradient(inputs_to_check, place, output_names, + None) + gpu_grads = self._get_gradient(inputs_to_check, place2, output_names, + None) + self._assert_is_close(cpu_grads, gpu_grads, inputs_to_check, 0.005, + "Gradient Check On %s" % str(place)) + + def test_check_grad(self): + if self.compare_between_place: + self.do_compare_between_place() + return + place = core.CPUPlace() + self.check_grad_with_place( + place, set(['X', 'Scale', 'Bias']), 'Y', max_relative_error=0.01) + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + self.check_grad_with_place( + place, + set(['X', 'Scale', 'Bias']), + 'Y', + max_relative_error=0.01) + + def init_test_case(self): + pass + + +class TestGroupNormOp1(TestGroupNormOp): + def init_test_case(self): + self.attrs['groups'] = 1 + + +class TestGroupNormOp2(TestGroupNormOp): + def init_test_case(self): + self.attrs['groups'] = 4 + + +class TestGroupNormOpBigEps1(TestGroupNormOp): + def init_test_case(self): + self.attrs['groups'] = 1 + self.attrs['epsilon'] = 0.5 + + +class TestGroupNormOpBigEps2(TestGroupNormOp): + def init_test_case(self): + self.attrs['groups'] = 4 + self.attrs['epsilon'] = 0.5 + + +class TestGroupNormOpBigEps3(TestGroupNormOp): + def init_test_case(self): + self.attrs['epsilon'] = 0.5 + + +class TestGroupNormOpLargeData(TestGroupNormOp): + def init_test_case(self): + self.shape = (2, 32, 64, 64) + self.attrs['groups'] = 8 + self.compare_between_place = True + + +if __name__ == '__main__': + unittest.main() diff --git a/tools/manylinux1/Dockerfile.x64 b/tools/manylinux1/Dockerfile.x64 index 0d59e4c110ff8502acb4dbcda15f855f7652a946..e91216a5b89c585dd8ccade472e21e6524de9eb9 100644 --- a/tools/manylinux1/Dockerfile.x64 +++ b/tools/manylinux1/Dockerfile.x64 @@ -36,17 +36,21 @@ RUN cd /opt && wget -q --no-check-certificate https://github.com/google/protobuf tar xzf protobuf-cpp-3.1.0.tar.gz && \ cd protobuf-3.1.0 && ./configure && make -j4 && make install && cd .. && rm -f protobuf-cpp-3.1.0.tar.gz -RUN wget -O /root/requirements.txt https://raw.githubusercontent.com/PaddlePaddle/Paddle/develop/python/requirements.txt +RUN wget https://raw.githubusercontent.com/PaddlePaddle/Paddle/develop/python/requirements.txt -O /root/requirements.txt RUN LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH} /opt/python/cp27-cp27mu/bin/pip install -r /root/requirements.txt && \ LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH} /opt/python/cp27-cp27m/bin/pip install -r /root/requirements.txt && \ LD_LIBRARY_PATH=/opt/_internal/cpython-3.5.1/lib/:${LD_LIBRARY_PATH} /opt/_internal/cpython-3.5.1/bin/pip3 install -r /root/requirements.txt && \ + LD_LIBRARY_PATH=/opt/_internal/cpython-3.6.0/lib/:${LD_LIBRARY_PATH} /opt/_internal/cpython-3.6.0/bin/pip3 install -r /root/requirements.txt && \ + LD_LIBRARY_PATH=/opt/_internal/cpython-3.7.0/lib/:${LD_LIBRARY_PATH} /opt/_internal/cpython-3.7.0/bin/pip3 install -r /root/requirements.txt && \ go get github.com/Masterminds/glide && \ rm -rf /root/requirements.txt RUN LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH} /opt/python/cp27-cp27mu/bin/pip install pre-commit 'ipython==5.3.0' opencv-python && \ LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH} /opt/python/cp27-cp27m/bin/pip install pre-commit 'ipython==5.3.0' opencv-python && \ - LD_LIBRARY_PATH=/opt/_internal/cpython-3.5.1/lib/:${LD_LIBRARY_PATH} /opt/_internal/cpython-3.5.1/bin/pip3 install pre-commit 'ipython==5.3.0' opencv-python + LD_LIBRARY_PATH=/opt/_internal/cpython-3.5.1/lib/:${LD_LIBRARY_PATH} /opt/_internal/cpython-3.5.1/bin/pip3 install pre-commit 'ipython==5.3.0' opencv-python && \ + LD_LIBRARY_PATH=/opt/_internal/cpython-3.6.0/lib/:${LD_LIBRARY_PATH} /opt/_internal/cpython-3.6.0/bin/pip3 install pre-commit 'ipython==5.3.0' opencv-python && \ + LD_LIBRARY_PATH=/opt/_internal/cpython-3.7.0/lib/:${LD_LIBRARY_PATH} /opt/_internal/cpython-3.7.0/bin/pip3 install pre-commit 'ipython==5.3.0' opencv-python RUN wget -O /opt/swig-2.0.12.tar.gz https://cytranet.dl.sourceforge.net/project/swig/swig/swig-2.0.12/swig-2.0.12.tar.gz && \ cd /opt && tar xzf swig-2.0.12.tar.gz && cd /opt/swig-2.0.12 && ./configure && make && make install && cd /opt && rm swig-2.0.12.tar.gz diff --git a/tools/manylinux1/build_scripts/build.sh b/tools/manylinux1/build_scripts/build.sh index eb4b477dcb538f7ba17cfc54057a97c9669a6916..6c551eceb4543bf33229b9e5b5124522f3ee134c 100644 --- a/tools/manylinux1/build_scripts/build.sh +++ b/tools/manylinux1/build_scripts/build.sh @@ -9,12 +9,12 @@ set -ex # remove others to expedite build and reduce docker image size. The original # manylinux docker image project builds many python versions. # NOTE We added back 3.5.1, since auditwheel requires python 3.3+ -CPYTHON_VERSIONS="2.7.11 3.5.1" +CPYTHON_VERSIONS="3.7.0 3.6.0 3.5.1 2.7.11" # openssl version to build, with expected sha256 hash of .tar.gz # archive -OPENSSL_ROOT=openssl-1.0.2l -OPENSSL_HASH=ce07195b659e75f4e1db43552860070061f156a98bb37b672b101ba6e3ddf30c +OPENSSL_ROOT=openssl-1.1.0i +OPENSSL_HASH=ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99 EPEL_RPM_HASH=e5ed9ecf22d0c4279e92075a64c757ad2b38049bcf5c16c4f2b75d5f6860dc0d DEVTOOLS_HASH=a8ebeb4bed624700f727179e6ef771dafe47651131a00a78b342251415646acc PATCHELF_HASH=d9afdff4baeacfbc64861454f368b7f2c15c44d245293f7587bbf726bfe722fb @@ -25,7 +25,7 @@ AUTOCONF_HASH=954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969 # Dependencies for compiling Python that we want to remove from # the final image after compiling Python -PYTHON_COMPILE_DEPS="zlib-devel bzip2-devel ncurses-devel sqlite-devel readline-devel tk-devel gdbm-devel db4-devel libpcap-devel xz-devel" +PYTHON_COMPILE_DEPS="zlib-devel bzip2-devel ncurses-devel sqlite-devel readline-devel tk-devel gdbm-devel db4-devel libpcap-devel xz-devel libffi-devel" # Libraries that are allowed as part of the manylinux1 profile MANYLINUX1_DEPS="glibc-devel libstdc++-devel glib2-devel libX11-devel libXext-devel libXrender-devel mesa-libGL-devel libICE-devel libSM-devel ncurses-devel freetype-devel libpng-devel" @@ -61,7 +61,7 @@ yum -y install bzip2 make git patch unzip bison yasm diffutils \ wget -q https://cmake.org/files/v3.5/cmake-3.5.2.tar.gz && tar xzf cmake-3.5.2.tar.gz && \ cd cmake-3.5.2 && ./bootstrap && \ -make -j4 && make install && cd .. && rm cmake-3.5.2.tar.gz +make -j8 && make install && cd .. && rm cmake-3.5.2.tar.gz # Install newest autoconf @@ -77,11 +77,13 @@ mkdir -p /opt/python build_cpythons $CPYTHON_VERSIONS PY35_BIN=/opt/python/cp35-cp35m/bin +PY36_BIN=/opt/python/cp36-cp36m/bin +PY37_BIN=/opt/python/cp37-cp37m/bin # NOTE Since our custom manylinux image builds pythons with shared # libpython, we need to add libpython's dir to LD_LIBRARY_PATH before running # python. ORIGINAL_LD_LIBRARY_PATH="${LD_LIBRARY_PATH}" -LD_LIBRARY_PATH="${ORIGINAL_LD_LIBRARY_PATH}:$(dirname ${PY35_BIN})/lib" +LD_LIBRARY_PATH="${ORIGINAL_LD_LIBRARY_PATH}:$(dirname ${PY35_BIN})/lib:$(dirname ${PY36_BIN})/lib:$(dirname ${PY37_BIN})/lib" # Our openssl doesn't know how to find the system CA trust store # (https://github.com/pypa/manylinux/issues/53) @@ -119,9 +121,8 @@ ln -s $PY35_BIN/auditwheel /usr/local/bin/auditwheel # final image yum -y erase wireless-tools gtk2 libX11 hicolor-icon-theme \ avahi freetype bitstream-vera-fonts \ - ${PYTHON_COMPILE_DEPS} > /dev/null 2>&1 -yum -y install ${MANYLINUX1_DEPS} -yum -y clean all > /dev/null 2>&1 + ${PYTHON_COMPILE_DEPS} > /dev/null 2>&1 || true +yum -y install ${MANYLINUX1_DEPS} && yum -y clean all > /dev/null 2>&1 || true yum list installed # we don't need libpython*.a, and they're many megabytes find /opt/_internal -name '*.a' -print0 | xargs -0 rm -f diff --git a/tools/manylinux1/build_scripts/build_utils.sh b/tools/manylinux1/build_scripts/build_utils.sh index 10422ae3bd00f4e0dd059af0384f8cc17e4b7855..d97745ad2dd80e9855f9d8b643cf1e9e836b467c 100755 --- a/tools/manylinux1/build_scripts/build_utils.sh +++ b/tools/manylinux1/build_scripts/build_utils.sh @@ -52,9 +52,17 @@ function do_cpython_build { # NOTE --enable-shared for generating libpython shared library needed for # linking of some of the nupic.core test executables. - CFLAGS="-Wformat" ./configure --prefix=${prefix} --enable-shared $unicode_flags > /dev/null - make -j2 > /dev/null - make install > /dev/null + if [ $(lex_pyver $py_ver) -ge $(lex_pyver 3.7) ]; then + # NOTE python 3.7 should be installed via make altinstall rather than + # make install, and we should specify the location of ssl + CFLAGS="-Wformat" ./configure --prefix=${prefix} --with-openssl=/usr/local/ssl --enable-shared $unicode_flags > /dev/null + make -j8 > /dev/null + make altinstall > /dev/null + else + CFLAGS="-Wformat" ./configure --prefix=${prefix} --enable-shared $unicode_flags > /dev/null + make -j8 > /dev/null + make install > /dev/null + fi popd echo "ZZZ looking for libpython" find / -name 'libpython*.so*' @@ -64,6 +72,9 @@ function do_cpython_build { if [ -e ${prefix}/bin/python3 ]; then ln -s python3 ${prefix}/bin/python fi + if [ -e ${prefix}/bin/python3.7 ]; then + ln -s python3.7 ${prefix}/bin/python + fi # NOTE Make libpython shared library visible to python calls below LD_LIBRARY_PATH="${prefix}/lib" ${prefix}/bin/python get-pip.py LD_LIBRARY_PATH="${prefix}/lib" ${prefix}/bin/pip install wheel