// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include #include #include "paddle/fluid/framework/ir/pass_builder.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/nccl_helper.h" #endif namespace paddle { namespace framework { namespace details { struct BuildStrategy { // ParallelExecutor supports two modes of ReduceStrategy, kAllReduce and // kReduce, for CPU and GPU. If you use kAllReduce, different threads // optimize their parameters separately. If you use kReduce, the optimizations // of parameters are distributed to different threads. // For example, a model has 100 parameters and is running with four threads, // if you choose kAllReduce, every thread is to optimize 100 parameters // separately, if you choose kReduce, every thread is to optimize 25 // parameters. // Of particular note is, if you use kReduce when using CPU training, // all the parameters are shared between different threads. This feature will // save memory. // FIXME(zcd): The result of the two modes(kAllReduce and kReduce) maybe not // equal for GPU. Because, the result of the different order of summing maybe // different, for example, the result of `a+b+c+d` may be different with the // result of `c+a+b+d`. // For GPU, the implementation of kAllReduce and kReduce is adopted NCCL, // so the result of kAllReduce and kReduce maybe not equal. // For CPU, if you want to fix the order of summing to make the result // of kAllReduce and kReduce no diff, you can add // `FLAGS_cpu_deterministic=true` to env. enum class ReduceStrategy { kAllReduce = 0, kReduce = 1 }; enum class GradientScaleStrategy { kCoeffNumDevice = 0, kOne = 1, kCustomized = 2, }; ReduceStrategy reduce_{ReduceStrategy::kAllReduce}; GradientScaleStrategy gradient_scale_{GradientScaleStrategy::kCoeffNumDevice}; std::string debug_graphviz_path_{""}; bool fuse_elewise_add_act_ops_{false}; bool enable_data_balance_{false}; // The PassBuilder assembles passes based on the configs defined above. // For example, if fuse_elewise_add_act_ops_ is true, the corresponding // fuse pass will be added. // The PassBuilder allows for more customized insert, remove of passes // from python. // A new PassBuilder is created and passes are owned by the PassBuilder. std::shared_ptr CreatePassesFromStrategy() const; // Apply the passes built by the pass_builder_. The passes will be // applied to the Program and output an ir::Graph. std::unique_ptr Apply( const ProgramDesc &main_program, const std::vector &places, const std::string &loss_var_name, const std::unordered_set ¶m_names, const std::vector &local_scopes, #ifdef PADDLE_WITH_CUDA const bool use_cuda, platform::NCCLContextMap *nccl_ctxs) const; #else const bool use_cuda) const; #endif private: mutable std::shared_ptr pass_builder_; }; } // namespace details } // namespace framework } // namespace paddle