diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index df3497de209e3b6ede6986e1ac5f92c4427ca9bd..68c6c8fd67db9f4428d612e86305fa0ba5f98a50 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -293,6 +293,7 @@ paddle.fluid.layers.sigmoid (ArgSpec(args=['x', 'name'], varargs=None, keywords= paddle.fluid.layers.logsigmoid (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '81ccb7acafd06c7728e11581f5d342e3')) paddle.fluid.layers.exp (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e6b3e769413d96aab4176f96db25984b')) paddle.fluid.layers.tanh (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e9d586a0b5bd05f67ee78048f9d503b6')) +paddle.fluid.layers.atan (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '3a46e0b5f9ce82348406478e610f14c9')) paddle.fluid.layers.tanh_shrink (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '1e521554b9fdda9061ec6d306f0709b7')) paddle.fluid.layers.softshrink (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '9eef31597bbafa2bd49691e072296e13')) paddle.fluid.layers.sqrt (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '072a8541e0f632366bba10f67cb0db27')) @@ -300,6 +301,8 @@ paddle.fluid.layers.abs (ArgSpec(args=['x', 'name'], varargs=None, keywords=None paddle.fluid.layers.ceil (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c75d67dc5fe28f68e4cfffead4f698ad')) paddle.fluid.layers.floor (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '647b16c5da5ef909649ae02abb434973')) paddle.fluid.layers.cos (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '485f2686bcc2fe37a4bd893769c8a3e2')) +paddle.fluid.layers.acos (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '920a47734482276c069ba24c61c26b25')) +paddle.fluid.layers.asin (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'cf4ee2c9b9d7293556f8c5173dfb5d2c')) paddle.fluid.layers.sin (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '01f1766aa76eff1df30147505b59f7c4')) paddle.fluid.layers.round (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b47f5da13913d3e56bdb1e612a73f3f2')) paddle.fluid.layers.reciprocal (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'cc6ac2f14f03c52aaa83a59bf83b8d26')) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index f3869ceb6d355914107052ae046559195cc969cb..99192292b0be992d5ff0ecebba6294b9ba27e958 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -34,11 +34,11 @@ limitations under the License. */ #ifdef PADDLE_WITH_NGRAPH #include "paddle/fluid/operators/ngraph/ngraph_engine.h" +DEFINE_bool(use_ngraph, false, "Use NGRAPH to run"); #endif DECLARE_bool(benchmark); DEFINE_bool(use_mkldnn, false, "Use MKLDNN to run"); -DEFINE_bool(use_ngraph, false, "Use NGRAPH to run"); namespace paddle { namespace framework { @@ -194,9 +194,6 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, bool force_disable_gc) { platform::RecordBlock b(block_id); if (FLAGS_use_mkldnn) EnableMKLDNN(pdesc); -#ifdef PADDLE_WITH_NGRAPH - if (FLAGS_use_ngraph) operators::NgraphEngine::EnableNgraph(pdesc); -#endif auto ctx = Prepare(pdesc, block_id, skip_ref_cnt_vars, force_disable_gc); RunPreparedContext(ctx.get(), scope, create_local_scope, create_vars); } @@ -372,6 +369,12 @@ std::unique_ptr Executor::Prepare( for (auto& op_desc : block.AllOps()) { ctx->ops_.push_back(OpRegistry::CreateOp(*op_desc)); } +#ifdef PADDLE_WITH_NGRAPH + if (FLAGS_use_ngraph) { + paddle::operators::NgraphEngine::FuseNgraphOps( + ctx->prog_.Block(ctx->block_id_), &ctx->ops_); + } +#endif return ctx; } diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 980c5d8582c37d0949af6c049de921db2e99a229..a9a53b0d74f3b3ff8c4e1e4973a44fb06071f6ec 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -934,8 +934,10 @@ void OperatorWithKernel::RunImpl(const Scope& scope, dev_ctx = pool.Get(expected_kernel_key.place_); } - RuntimeInferShapeContext infer_shape_ctx(*this, exec_scope, *runtime_ctx_); - this->InferShape(&infer_shape_ctx); + if (!HasAttr(kAllKernelsMustComputeRuntimeShape)) { + RuntimeInferShapeContext infer_shape_ctx(*this, exec_scope, *runtime_ctx_); + this->InferShape(&infer_shape_ctx); + } // TODO(panyx0718): ExecutionContext should only depend on RuntimeContext // not Scope. Imperative mode only pass inputs and get outputs. kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx, diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 29b9c45ccf3dcbffc0ffb23c4e91d9f7c64dc025..323aa5a7f58abd984ae2d420f4885d2b8b1c4e16 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -70,6 +70,15 @@ constexpr char kNewGradSuffix[] = "@NEWGRAD@"; /// execution to save the elapsed time. constexpr char kEnableRuntimeContext[] = "@ENABLE_RUNTIME_CONTEXT@"; +/// If an Op has this attribute, all its kernels should calculate output +/// variable's shape in the corresponding Compute() function. And +/// OperatorWithKernel::RunImpl() would skip call this Op's InferShape() +/// function in its runtime for speedup. +/// TODO(luotao): Note that this temporal attribute would be deleted after all +/// ops contain it. +constexpr char kAllKernelsMustComputeRuntimeShape[] = + "@ALL_KERNELS_MUST_COMPUTE_RUNTIME_SHAPE@"; + // define some kernel priority /* Define multiple kernel type fallback order*/ extern std::vector> kKernelPriority; diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 3e1d61813ca83ebdf9435036117e79abe501b24b..acc71396b441149988f654843482a9e292977de3 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -181,13 +181,14 @@ std::vector &ParallelExecutor::GetLocalScopes() { return member_->local_scopes_; } -ParallelExecutor::ParallelExecutor( - const std::vector &places, - const std::unordered_set &bcast_vars, - const std::string &loss_var_name, Scope *scope, - const std::vector &local_scopes, - const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy, - ir::Graph *graph) +ParallelExecutor::ParallelExecutor(const std::vector &places, + const std::vector &bcast_vars, + const std::string &loss_var_name, + Scope *scope, + const std::vector &local_scopes, + const ExecutionStrategy &exec_strategy, + const BuildStrategy &build_strategy, + ir::Graph *graph) : member_(new ParallelExecutorPrivate(places)) { member_->global_scope_ = scope; member_->use_cuda_ = exec_strategy.use_cuda_; @@ -254,9 +255,23 @@ ParallelExecutor::ParallelExecutor( PADDLE_THROW("Not compiled with CUDA"); #endif } - if (member_->local_scopes_.size() != 1 && local_scopes.empty()) { - BCastParamsToDevices(bcast_vars); + // broadcast parameters from the 0th device to others: + auto need_broadcast = [&]() -> bool { + if (build_strategy.num_trainers_ > 1) { + // 1. num_tariners would be grater than 1 for nccl distributed training. + return true; + } else if (member_->local_scopes_.size() != 1 && local_scopes.empty()) { + // 2. Only one trainer process, but ParallelExecutor hold multiple + // devices. + return true; + } + return false; + }; + + if (need_broadcast()) { + BCastParamsToDevices(bcast_vars, build_strategy.trainer_id_); } + // Startup Program has been run. All local scopes has correct parameters. // Step 2. Convert main_program to SSA form and dependency graph. Also, insert @@ -338,7 +353,7 @@ ParallelExecutor::ParallelExecutor( } void ParallelExecutor::BCastParamsToDevices( - const std::unordered_set &vars) const { + const std::vector &vars, int trainer_id) const { // the initializing bcast, all vars would be bcast from device(0). for (auto &var : vars) { framework::Variable *main_var = member_->local_scopes_[0]->FindVar(var); @@ -362,7 +377,7 @@ void ParallelExecutor::BCastParamsToDevices( auto place = member_->places_[i]; void *buffer; - if (i == 0) { + if (i == 0 && trainer_id == 0) { buffer = const_cast(main_tensor.data()); } else { auto local_scope = member_->local_scopes_[i]; diff --git a/paddle/fluid/framework/parallel_executor.h b/paddle/fluid/framework/parallel_executor.h index ddf60b39466e72822142e1dad2cfe9a97b6cf6f2..d4658b9623fe8c23b6a8b2903e3c48d794ba1652 100644 --- a/paddle/fluid/framework/parallel_executor.h +++ b/paddle/fluid/framework/parallel_executor.h @@ -14,9 +14,11 @@ limitations under the License. */ #pragma once +#include #include #include #include +#include #include #include "paddle/fluid/framework/details/build_strategy.h" @@ -45,7 +47,7 @@ class ParallelExecutor { public: explicit ParallelExecutor(const std::vector &places, - const std::unordered_set &bcast_vars, + const std::vector &bcast_vars, const std::string &loss_var_name, Scope *scope, const std::vector &local_scopes, const ExecutionStrategy &exec_strategy, @@ -70,7 +72,10 @@ class ParallelExecutor { const std::string &fetched_var_name); private: - void BCastParamsToDevices(const std::unordered_set &vars) const; + // broadcast the parameters from the 0th device. + // trainer_id the trainer index in nccl distributed training. + void BCastParamsToDevices(const std::vector &vars, + int trainer_id = 0) const; bool EnableParallelGraphExecution(const ir::Graph &graph, const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy) const; diff --git a/paddle/fluid/memory/allocation/CMakeLists.txt b/paddle/fluid/memory/allocation/CMakeLists.txt index 4b7b9064dcde9b5209264257d51bbd976ba8eb85..7c44e18f8f39cfcdf749441ba7530e5227c44b5f 100644 --- a/paddle/fluid/memory/allocation/CMakeLists.txt +++ b/paddle/fluid/memory/allocation/CMakeLists.txt @@ -3,7 +3,7 @@ cc_library(cpu_allocator SRCS cpu_allocator.cc DEPS allocator) cc_library(best_fit_allocator SRCS best_fit_allocator.cc DEPS allocator) cc_library(locked_allocator SRCS locked_allocator.cc DEPS allocator) cc_library(buffered_allocator SRCS buffered_allocator.cc DEPS allocator) -cc_library(legacy_allocator SRCS legacy_allocator.cc DEPS allocator buddy_allocator) +cc_library(legacy_allocator SRCS legacy_allocator.cc DEPS allocator buddy_allocator profiler) cc_test(buffered_allocator_test SRCS buffered_allocator_test.cc DEPS best_fit_allocator locked_allocator buffered_allocator cpu_allocator) if (WITH_GPU) diff --git a/paddle/fluid/memory/allocation/legacy_allocator.cc b/paddle/fluid/memory/allocation/legacy_allocator.cc index a97d54a1917df69c62af02895510435a59225186..c233bf4edf5462dc48f6c3f4f22a517a03585b45 100644 --- a/paddle/fluid/memory/allocation/legacy_allocator.cc +++ b/paddle/fluid/memory/allocation/legacy_allocator.cc @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/memory/allocation/legacy_allocator.h" - #include #include #include @@ -24,9 +22,11 @@ #endif #include "glog/logging.h" +#include "paddle/fluid/memory/allocation/legacy_allocator.h" #include "paddle/fluid/memory/detail/buddy_allocator.h" #include "paddle/fluid/memory/detail/system_allocator.h" #include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/string/printf.h" #include "paddle/fluid/string/split.h" @@ -329,18 +329,22 @@ size_t Usage::operator()(const platform::CUDAPinnedPlace &cuda_pinned) const { } // namespace legacy namespace allocation { - LegacyMemMonitor GPUMemMonitor; Allocation *LegacyAllocator::AllocateImpl(size_t size, Allocator::Attr attr) { void *ptr = boost::apply_visitor(legacy::AllocVisitor(size), place_); - return new Allocation(ptr, size, place_); + auto *tmp_alloc = new Allocation(ptr, size, place_); + platform::MemEvenRecorder::Instance().PushMemRecord( + static_cast(tmp_alloc), place_, size); + return tmp_alloc; } void LegacyAllocator::Free(Allocation *allocation) { boost::apply_visitor( legacy::FreeVisitor(allocation->ptr(), allocation->size()), allocation->place()); + platform::MemEvenRecorder::Instance().PopMemRecord( + static_cast(allocation), place_); delete allocation; } diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index 2feb8e4c4787440fd086c597fa2a7f97204e34ac..f79960317aa1bac7ae9f8d80e4886dde8fe8ebcb 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -13,7 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/activation_op.h" +#include #include +#include #include "paddle/fluid/operators/mkldnn/mkldnn_activation_op.h" #include "paddle/fluid/platform/port.h" #ifdef PADDLE_WITH_CUDA @@ -269,6 +271,48 @@ $$out = \\frac{x}{1 + \|x\|}$$ )DOC"; +class AcosOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "Input of acos operator"); + AddOutput("Out", "Output of acos operator"); + AddComment(R"DOC( +Arccosine Activation Operator. + +$$out = \cos^{-1}(x)$$ + +)DOC"); + } +}; + +class AsinOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "Input of asin operator"); + AddOutput("Out", "Output of asin operator"); + AddComment(R"DOC( +Arcsine Activation Operator. + +$$out = \sin^{-1}(x)$$ + +)DOC"); + } +}; + +class AtanOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "Input of atan operator"); + AddOutput("Out", "Output of atan operator"); + AddComment(R"DOC( +Arctanh Activation Operator. + +$$out = \tanh^{-1}(x)$$ + +)DOC"); + } +}; + class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { @@ -543,7 +587,10 @@ namespace ops = paddle::operators; __macro(SoftShrink, softshrink); \ __macro(Abs, abs); \ __macro(Cos, cos); \ + __macro(Acos, acos); \ __macro(Sin, sin); \ + __macro(Asin, asin); \ + __macro(Atan, atan); \ __macro(Round, round); \ __macro(Log, log); \ __macro(Square, square); \ diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index 1f5ae7fb5cd2e1c14190602d2c35e6c3755cfd70..ff7e623f6f383ed2a8b8a40b3186d9c439ff1d86 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -39,9 +39,8 @@ namespace operators { Please refer to the layer_helper.py and get the details. */ static std::unordered_set InplaceOpSet = { - "sigmoid", "exp", "relu", "tanh", "sqrt", "ceil", - "floor", "reciprocal", "relu6", "soft_relu", "hard_sigmoid", -}; + "sigmoid", "exp", "relu", "tanh", "sqrt", "ceil", + "floor", "reciprocal", "relu6", "soft_relu", "hard_sigmoid"}; static bool IsInplace(const std::string& op) { bool inplace = InplaceOpSet.count(op); @@ -553,6 +552,101 @@ struct SinFunctor : public BaseActivationFunctor { } }; +template +struct Acos { + HOSTDEVICE T operator()(const T& val) const { return acos(val); } +}; + +template <> +struct Acos { + HOSTDEVICE platform::float16 operator()(const platform::float16& val) const { + return platform::float16(acos(static_cast(val))); + } +}; + +// Acos(x) = acos(x) +template +struct AcosFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.unaryExpr(Acos()); + } +}; + +// acos'(x) = -1/sqrt(1-x^2) +template +struct AcosGradFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = + -dout * static_cast(1) / (static_cast(1) - x.square()).sqrt(); + } +}; + +template +struct Asin { + HOSTDEVICE T operator()(const T& val) const { return asin(val); } +}; + +template <> +struct Asin { + HOSTDEVICE platform::float16 operator()(const platform::float16& val) const { + return platform::float16(asin(static_cast(val))); + } +}; + +// Asin(x) = asin(x) +template +struct AsinFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.unaryExpr(Asin()); + } +}; + +// asin'(x) = 1/sqrt(1-x^2) +template +struct AsinGradFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = + dout * static_cast(1) / (static_cast(1) - x.square()).sqrt(); + } +}; + +template +struct Atan { + HOSTDEVICE T operator()(const T& val) const { return atan(val); } +}; + +template <> +struct Atan { + HOSTDEVICE platform::float16 operator()(const platform::float16& val) const { + return platform::float16(atan(static_cast(val))); + } +}; + +// Atan(x) = atan(x) +template +struct AtanFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.unaryExpr(Atan()); + } +}; + +// atan'(x) = 1 / (1 + x^2) +template +struct AtanGradFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * static_cast(1) / (static_cast(1) + x.square()); + } +}; + // round(x) = [x] template struct RoundFunctor : public BaseActivationFunctor { @@ -1001,13 +1095,16 @@ struct SwishGradFunctor : public BaseActivationFunctor { __macro(relu, ReluFunctor, ReluGradFunctor); \ __macro(gelu, GeluFunctor, GeluGradFunctor); \ __macro(tanh, TanhFunctor, TanhGradFunctor); \ + __macro(atan, AtanFunctor, AtanGradFunctor); \ __macro(softshrink, SoftShrinkFunctor, SoftShrinkGradFunctor); \ __macro(sqrt, SqrtFunctor, SqrtGradFunctor); \ __macro(abs, AbsFunctor, AbsGradFunctor); \ __macro(ceil, CeilFunctor, ZeroGradFunctor); \ __macro(floor, FloorFunctor, ZeroGradFunctor); \ __macro(cos, CosFunctor, CosGradFunctor); \ + __macro(acos, AcosFunctor, AcosGradFunctor); \ __macro(sin, SinFunctor, SinGradFunctor); \ + __macro(asin, AsinFunctor, AsinGradFunctor); \ __macro(round, RoundFunctor, ZeroGradFunctor); \ __macro(reciprocal, ReciprocalFunctor, ReciprocalGradFunctor); \ __macro(log, LogFunctor, LogGradFunctor); \ diff --git a/paddle/fluid/operators/detection/box_coder_op.h b/paddle/fluid/operators/detection/box_coder_op.h index 6d406f8196f9964c85bb94541fa7a7a23857539b..d4c7e8cf7723bf83d3cd8bf36b9ae6c5f1c35b10 100644 --- a/paddle/fluid/operators/detection/box_coder_op.h +++ b/paddle/fluid/operators/detection/box_coder_op.h @@ -20,7 +20,7 @@ namespace operators { enum class BoxCodeType { kEncodeCenterSize = 0, kDecodeCenterSize = 1 }; -inline BoxCodeType GetBoxCodeType(const std::string& type) { +inline BoxCodeType GetBoxCodeType(const std::string &type) { if (type == "encode_center_size") { return BoxCodeType::kEncodeCenterSize; } else if (type == "decode_center_size") { @@ -32,24 +32,23 @@ inline BoxCodeType GetBoxCodeType(const std::string& type) { template class BoxCoderKernel : public framework::OpKernel { public: - void EncodeCenterSize(const framework::Tensor* target_box, - const framework::Tensor* prior_box, - const framework::Tensor* prior_box_var, + void EncodeCenterSize(const framework::Tensor *target_box, + const framework::Tensor *prior_box, + const framework::Tensor *prior_box_var, const bool normalized, - const std::vector variance, T* output) const { + const std::vector variance, T *output) const { int64_t row = target_box->dims()[0]; int64_t col = prior_box->dims()[0]; int64_t len = prior_box->dims()[1]; - auto* target_box_data = target_box->data(); - auto* prior_box_data = prior_box->data(); - const T* prior_box_var_data = nullptr; - if (prior_box_var) prior_box_var_data = prior_box_var->data(); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(2) #endif for (int64_t i = 0; i < row; ++i) { for (int64_t j = 0; j < col; ++j) { + auto *target_box_data = target_box->data(); + auto *prior_box_data = prior_box->data(); + size_t offset = i * col * len + j * len; T prior_box_width = prior_box_data[j * len + 2] - prior_box_data[j * len] + (normalized == false); T prior_box_height = prior_box_data[j * len + 3] - @@ -69,7 +68,6 @@ class BoxCoderKernel : public framework::OpKernel { target_box_data[i * len + 1] + (normalized == false); - size_t offset = i * col * len + j * len; output[offset] = (target_box_center_x - prior_box_center_x) / prior_box_width; output[offset + 1] = @@ -78,44 +76,61 @@ class BoxCoderKernel : public framework::OpKernel { std::log(std::fabs(target_box_width / prior_box_width)); output[offset + 3] = std::log(std::fabs(target_box_height / prior_box_height)); - if (prior_box_var) { - int prior_var_offset = j * len; - output[offset] /= prior_box_var_data[prior_var_offset]; - output[offset + 1] /= prior_box_var_data[prior_var_offset + 1]; - output[offset + 2] /= prior_box_var_data[prior_var_offset + 2]; - output[offset + 3] /= prior_box_var_data[prior_var_offset + 3]; - } else if (!(variance.empty())) { + } + } + + if (prior_box_var) { + const T *prior_box_var_data = prior_box_var->data(); +#ifdef PADDLE_WITH_MKLML +#pragma omp parallel for collapse(3) +#endif + for (int64_t i = 0; i < row; ++i) { + for (int64_t j = 0; j < col; ++j) { for (int k = 0; k < 4; ++k) { + size_t offset = i * col * len + j * len; + int prior_var_offset = j * len; + output[offset + k] /= prior_box_var_data[prior_var_offset + k]; + } + } + } + } else if (!(variance.empty())) { +#ifdef PADDLE_WITH_MKLML +#pragma omp parallel for collapse(3) +#endif + for (int64_t i = 0; i < row; ++i) { + for (int64_t j = 0; j < col; ++j) { + for (int k = 0; k < 4; ++k) { + size_t offset = i * col * len + j * len; output[offset + k] /= static_cast(variance[k]); } } } } } + template - void DecodeCenterSize(const framework::Tensor* target_box, - const framework::Tensor* prior_box, - const framework::Tensor* prior_box_var, + void DecodeCenterSize(const framework::Tensor *target_box, + const framework::Tensor *prior_box, + const framework::Tensor *prior_box_var, const bool normalized, std::vector variance, - T* output) const { + T *output) const { int64_t row = target_box->dims()[0]; int64_t col = target_box->dims()[1]; int64_t len = target_box->dims()[2]; - auto* target_box_data = target_box->data(); - auto* prior_box_data = prior_box->data(); - const T* prior_box_var_data = nullptr; - if (var_size == 2) prior_box_var_data = prior_box_var->data(); - int prior_box_offset = 0; - T var_data[4] = {1., 1., 1., 1.}; - T* var_ptr = var_data; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(2) #endif for (int64_t i = 0; i < row; ++i) { for (int64_t j = 0; j < col; ++j) { + auto *target_box_data = target_box->data(); + auto *prior_box_data = prior_box->data(); + + T var_data[4] = {1., 1., 1., 1.}; + T *var_ptr = var_data; size_t offset = i * col * len + j * len; - prior_box_offset = axis == 0 ? j * len : i * len; + int prior_box_offset = axis == 0 ? j * len : i * len; + T prior_box_width = prior_box_data[prior_box_offset + 2] - prior_box_data[prior_box_offset] + (normalized == false); @@ -131,10 +146,10 @@ class BoxCoderKernel : public framework::OpKernel { T target_box_width = 0, target_box_height = 0; int prior_var_offset = axis == 0 ? j * len : i * len; if (var_size == 2) { - std::memcpy(var_ptr, prior_box_var_data + prior_var_offset, + std::memcpy(var_ptr, prior_box_var->data() + prior_var_offset, 4 * sizeof(T)); } else if (var_size == 1) { - var_ptr = reinterpret_cast(variance.data()); + var_ptr = reinterpret_cast(variance.data()); } T box_var_x = *var_ptr; T box_var_y = *(var_ptr + 1); @@ -162,11 +177,11 @@ class BoxCoderKernel : public framework::OpKernel { } } - void Compute(const framework::ExecutionContext& context) const override { - auto* prior_box = context.Input("PriorBox"); - auto* prior_box_var = context.Input("PriorBoxVar"); - auto* target_box = context.Input("TargetBox"); - auto* output_box = context.Output("OutputBox"); + void Compute(const framework::ExecutionContext &context) const override { + auto *prior_box = context.Input("PriorBox"); + auto *prior_box_var = context.Input("PriorBoxVar"); + auto *target_box = context.Input("TargetBox"); + auto *output_box = context.Output("OutputBox"); std::vector variance = context.Attr>("variance"); const int axis = context.Attr("axis"); if (target_box->lod().size()) { @@ -194,7 +209,7 @@ class BoxCoderKernel : public framework::OpKernel { output_box->mutable_data({row, col, len}, context.GetPlace()); - T* output = output_box->data(); + T *output = output_box->data(); if (code_type == BoxCodeType::kEncodeCenterSize) { EncodeCenterSize(target_box, prior_box, prior_box_var, normalized, variance, output); diff --git a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc index 80caf70b08e65932d6ccb90a5293d072b2b2bc72..a0026427e2514735711f7eba26fcf861cb498d5e 100644 --- a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc +++ b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc @@ -23,9 +23,6 @@ class FusedEmbeddingSeqPoolOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - if (ctx->IsRuntime()) { - return; - } PADDLE_ENFORCE(ctx->HasInput("W"), "Input W of FusedEmbeddingSeqPoolOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Ids"), @@ -91,6 +88,8 @@ class FusedEmbeddingSeqPoolOpMaker : public framework::OpProtoAndCheckerMaker { "(boolean, default false) " "Sparse update.") .SetDefault(false); + AddAttr(framework::kAllKernelsMustComputeRuntimeShape, "") + .SetDefault(true); AddComment(R"DOC( FusedEmbeddingSeqPool Operator. diff --git a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h index 5e2e336e7117cc4816a52405b7bc2689bc03dd46..4651c2b2ba81a404b64818fec81cef79634ff036 100644 --- a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h +++ b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h @@ -121,6 +121,8 @@ class FusedEmbeddingSeqPoolGradKernel : public framework::OpKernel { auto *ids = context.Input("Ids"); auto *d_output = context.Input(framework::GradVarName("Out")); auto *d_table = context.Output(framework::GradVarName("W")); + // runtime shape + d_table->set_height(table_dim[0]); auto *ids_data = ids->data(); int64_t ids_num = ids->numel(); diff --git a/paddle/fluid/operators/hash_op.cc b/paddle/fluid/operators/hash_op.cc index 7a29f80ff1ce413519ea9cea6a35747bdced5885..f6395fb32feac175976cb96e1c0bee7347cb3ea8 100644 --- a/paddle/fluid/operators/hash_op.cc +++ b/paddle/fluid/operators/hash_op.cc @@ -26,9 +26,6 @@ class HashOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} void InferShape(framework::InferShapeContext *ctx) const override { - if (ctx->IsRuntime()) { - return; - } PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of HashOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), @@ -57,6 +54,8 @@ $$Out = scale * X$$ )DOC"); AddAttr("num_hash", "").SetDefault(1); AddAttr("mod_by", "").SetDefault(100000); + AddAttr(framework::kAllKernelsMustComputeRuntimeShape, "") + .SetDefault(true); } }; diff --git a/paddle/fluid/operators/ngraph/ngraph_engine.cc b/paddle/fluid/operators/ngraph/ngraph_engine.cc index 41037d9039bb53038af80eafa269ee9246dc9980..cd32200e925193b393f4531b87ed6b1e4291109d 100644 --- a/paddle/fluid/operators/ngraph/ngraph_engine.cc +++ b/paddle/fluid/operators/ngraph/ngraph_engine.cc @@ -29,7 +29,6 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/var_desc.h" #include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/operators/ngraph/ngraph_bridge.h" #include "paddle/fluid/operators/ngraph/ngraph_engine.h" @@ -42,44 +41,75 @@ static ngraph::Shape Ddim2Shape(const framework::DDim& dims) { for (int i = 0; i < dims.size(); ++i) { int k = dims[i]; k = k == 0 ? 1 : k; - sp.push_back(k); + sp.emplace_back(k); } return sp; } +static framework::DDim Shape2Ddim(const ngraph::Shape& shape) { + std::vector dims; + for (size_t i = 0; i < shape.size(); ++i) { + int64_t k = shape[i]; + dims.emplace_back(k); + } + return framework::make_ddim(dims); +} + static std::map pd2ng_type_map = { {framework::proto::VarType::FP32, ngraph::element::f32}, {framework::proto::VarType::FP64, ngraph::element::f64}, {framework::proto::VarType::INT32, ngraph::element::i32}, {framework::proto::VarType::INT64, ngraph::element::i64}, - {framework::proto::VarType::BOOL, ngraph::element::boolean}, -}; - -std::unordered_map> - NgraphEngine::func_cache_ = {}; + {framework::proto::VarType::BOOL, ngraph::element::boolean}}; + +static std::map + ng2pd_type_map = { + {ngraph::element::f32, framework::proto::VarType::FP32}, + {ngraph::element::f64, framework::proto::VarType::FP64}, + {ngraph::element::i32, framework::proto::VarType::INT32}, + {ngraph::element::i64, framework::proto::VarType::INT64}, + {ngraph::element::boolean, framework::proto::VarType::BOOL}}; + +std::vector NgraphEngine::feed_vars = {}; +std::vector NgraphEngine::fetch_vars = {}; +framework::Variable* NgraphEngine::pre_var_ptr = nullptr; +const framework::BlockDesc* NgraphEngine::p_bdesc = nullptr; + +std::unordered_map NgraphEngine::engine_cache = {}; +std::unordered_map>> + NgraphEngine::t_in_cache_ = {}; std::shared_ptr NgraphEngine::backend_ = ngraph::runtime::Backend::create("CPU"); static std::vector> NgraphOpIntervals( - framework::BlockDesc* block) { + std::vector>* ops) { + NgraphEngine::feed_vars.clear(); + NgraphEngine::fetch_vars.clear(); std::vector> intervals; - auto ops = block->AllOps(); - int size = ops.size(); + + int size = ops->size(); int left = 0; - while (left < size && ops.at(left)->Type() != framework::kFeedOpType) { + while (left < size && ops->at(left)->Type() != framework::kFeedOpType) { ++left; } if (left == size) { return intervals; } - while (left < size && ops.at(left)->Type() == framework::kFeedOpType) { + + while (left < size && ops->at(left)->Type() == framework::kFeedOpType) { + for (auto& var_name_item : ops->at(left)->Outputs()) { + for (auto& var_name : var_name_item.second) { + NgraphEngine::feed_vars.emplace_back(var_name); + } + } ++left; } int right = left; - while (right < size && ops.at(right)->Type() != framework::kFetchOpType) { + while (right < size && ops->at(right)->Type() != framework::kFetchOpType) { ++right; } if (right == size) { @@ -87,85 +117,124 @@ static std::vector> NgraphOpIntervals( } if (left >= right) return intervals; + int index = right; + while (index < size && ops->at(index)->Type() == framework::kFetchOpType) { + for (auto& var_name_item : ops->at(index)->Inputs()) { + for (auto& var_name : var_name_item.second) { + NgraphEngine::fetch_vars.emplace_back(var_name); + } + } + ++index; + } + // (left, right - 1) represents indices between feed and fetch int pivot = left; while (pivot < right) { - auto op_type = ops.at(pivot)->Type(); + auto op_type = ops->at(pivot)->Type(); if (NgraphBridge::isRegister(op_type)) { ++pivot; } else { int start = pivot, end = start; while (pivot < right && - (!NgraphBridge::isRegister(ops.at(pivot)->Type()))) { + (!NgraphBridge::isRegister(ops->at(pivot)->Type()))) { ++pivot; ++end; } std::vector interval = {start, end}; - intervals.push_back(interval); + intervals.emplace_back(interval); } } // end while return intervals; } -static void SubstituteNgraphOp(framework::BlockDesc* block, - std::string block_str, - std::vector interval) { - framework::ProgramDesc program; - block->RemoveOp(interval.at(0), interval.at(1)); - auto* ng_op = block->InsertOp(interval.at(0)); - ng_op->SetType("ngraph_engine"); - ng_op->SetAttr("interval", interval); - ng_op->SetAttr("graph", block_str); +static void SubstituteNgraphOp( + std::vector>* ops, + std::string engine_key, std::string block_str, std::vector interval) { + framework::OpDesc ng_op_desc(nullptr); + ng_op_desc.SetType("ngraph_engine"); + ng_op_desc.SetAttr("interval", interval); + ng_op_desc.SetAttr("engine_key", engine_key); + ng_op_desc.SetAttr("graph", block_str); + + ops->erase(ops->begin() + interval[0], ops->begin() + interval[1]); + ops->insert(ops->begin() + interval[0], + framework::OpRegistry::CreateOp(ng_op_desc)); } -// TODO(baojun-nervana): Move EnableNgraph to compile time per PR #15089 -void NgraphEngine::EnableNgraph(const framework::ProgramDesc& program) { -#ifdef PADDLE_WITH_NGRAPH - VLOG(4) << "use_ngraph=True"; - for (size_t bid = 0; bid < program.Size(); ++bid) { - // TODO(baojun-nervana): Remove the const_cast - auto* block = - const_cast(program).MutableBlock(bid); - std::string block_str = block->Proto()->SerializeAsString(); - auto intervals = NgraphOpIntervals(block); - for (auto it = intervals.rbegin(); it != intervals.rend(); ++it) { - SubstituteNgraphOp(block, block_str, *it); - } +std::string SerializedBlock(const std::vector& op_descs) { + framework::proto::BlockDesc block_proto; + framework::BlockDesc block_desc(nullptr, &block_proto); + block_desc.Proto()->set_parent_idx(-1); + block_desc.Proto()->set_idx(0); + + for (auto* op_desc : op_descs) { + auto* op = block_desc.AppendOp(); + *op->Proto() = *op_desc->Proto(); + } + return block_desc.Proto()->SerializeAsString(); +} + +std::string GenerateEngineKey(const framework::BlockDesc& bdesc) { + framework::proto::BlockDesc block_proto; + framework::BlockDesc block_desc(nullptr, &block_proto); + block_desc.Proto()->set_parent_idx(-1); + block_desc.Proto()->set_idx(0); + + for (auto& op_desc : bdesc.AllOps()) { + auto* op = block_desc.AppendOp(); + *op->Proto() = *op_desc->Proto(); + } + auto engine_key = std::to_string( + std::hash()(block_desc.Proto()->SerializeAsString())); + return engine_key; +} + +std::string GenerateEngineKey(const std::vector& engine_inputs, + const std::vector& engine_outputs, + int size) { + std::string engine_hash_key = ""; + for (auto name : engine_inputs) { + engine_hash_key += name; + } + for (auto name : engine_outputs) { + engine_hash_key += name; + } + engine_hash_key += std::to_string(size); + auto engine_key = std::to_string(std::hash()(engine_hash_key)); + return engine_key; +} + +void NgraphEngine::FuseNgraphOps( + const framework::BlockDesc& block_desc, + std::vector>* ops) { + NgraphEngine::p_bdesc = &block_desc; + auto intervals = NgraphOpIntervals(ops); + std::string engine_key = + GenerateEngineKey(feed_vars, fetch_vars, ops->size()); + for (auto it = intervals.rbegin(); it != intervals.rend(); ++it) { + SubstituteNgraphOp(ops, engine_key, "", *it); } -#else - LOG(WARNING) - << "'NGRAPH' is not supported, Please re-compile with WITH_NGRAPH option"; -#endif } NgraphEngine::NgraphEngine(const framework::Scope& scope, const platform::Place& place, - const std::string& serialized_graph, - const std::vector& interval) + const framework::ExecutionContext& ctx) : scope_(scope), place_(place) { + std::string serialized_graph = ctx.Attr("graph"); + auto interval = ctx.Attr>("interval"); + std::string engine_key = ctx.Attr("engine_key"); + var_in_node_map_ = std::make_shared< std::unordered_map>>(); var_node_map_ = std::make_shared< std::unordered_map>>(); - func_cache_key_ = std::to_string(interval[0]) + std::to_string(interval[1]) + - serialized_graph; - - framework::proto::BlockDesc bdesc; - bdesc.ParseFromString(serialized_graph); - framework::BlockDesc block(nullptr, &bdesc); - - Prepare(block, interval); - - BuildNgIO(); - - GetNgFunction(); + GetNgFunction(engine_key, interval); } -void NgraphEngine::Prepare(const framework::BlockDesc& block, - const std::vector& interval) { - for (auto& var : block.AllVars()) { +void NgraphEngine::Prepare(const std::vector& interval) { + for (auto& var : p_bdesc->AllVars()) { if (!(var->GetType() == framework::proto::VarType::SELECTED_ROWS || var->GetType() == framework::proto::VarType::LOD_TENSOR || var->GetType() == framework::proto::VarType::LOD_TENSOR_ARRAY)) { @@ -192,108 +261,57 @@ void NgraphEngine::Prepare(const framework::BlockDesc& block, } } - auto ops_desc = block.AllOps(); - int idx = interval[0]; - while (idx < interval[1]) { - auto op_desc = ops_desc.at(idx); - auto op = framework::OpRegistry::CreateOp(*op_desc); - fused_ops_.push_back(std::move(op)); - ++idx; - } - - while (ops_desc.at(idx)->Type() != framework::kFetchOpType) { - auto op_desc = ops_desc.at(idx); - for (auto& var_name_item : op_desc->Inputs()) { - for (auto& var_name : var_name_item.second) { - post_op_inputs_.insert(var_name); - } - } - ++idx; - } - - while (idx < static_cast(ops_desc.size()) && - ops_desc.at(idx)->Type() == framework::kFetchOpType) { - std::string fetch_target_name = ops_desc.at(idx)->Input("X")[0]; - fetches_.insert(fetch_target_name); - ++idx; - } - - if (ops_desc.at(interval.at(0) - 1)->Type() == framework::kFeedOpType && - ops_desc.at(interval.at(1))->Type() == framework::kFetchOpType) { - ng_op_state_ = OpState::FULL; + std::vector ops_desc; + for (auto op_desc : p_bdesc->AllOps()) { + ops_desc.emplace_back(op_desc); } - for (auto* op_desc : ops_desc) { + for (auto op_desc : ops_desc) { if (op_desc->Type().find("_grad") != std::string::npos) { - ng_op_state_ = ng_op_state_ == OpState::FULL ? OpState::FULL_TRAIN - : OpState::PARTIAL_TRAIN; + this->is_test_ = false; break; } } - if (ng_op_state_ != OpState::FULL_TRAIN && - ng_op_state_ != OpState::PARTIAL_TRAIN) { - ng_op_state_ = ng_op_state_ == OpState::FULL ? OpState::FULL_TEST - : OpState::PARTIAL_TEST; + if (interval[0] > 0 && + ops_desc.at(interval[0] - 1)->Type() == framework::kFeedOpType && + interval[1] < static_cast(ops_desc.size()) && + ops_desc.at(interval.at(1))->Type() == framework::kFetchOpType) { + this->op_state_ = OpState::FULL; } -} -void NgraphEngine::GetNgInputShape( - std::shared_ptr op) { - framework::RuntimeContext ctx(op->Inputs(), op->Outputs(), scope_); - op->RuntimeInferShape(scope_, place_, ctx); - for (auto& var_name_item : op->Inputs()) { - for (auto& var_name : var_name_item.second) { - auto* var = scope_.FindVar(var_name); - if (var && var->IsType()) { - auto* tensor_pd = GetLoDTensorOrSelectedRowsValueFromVar(*var); - auto sp = Ddim2Shape(tensor_pd->dims()); - if (std::find(var_in_.begin(), var_in_.end(), var_name) != - var_in_.end()) { - if (var_node_map_->find(var_name) == var_node_map_->end()) { - // auto ng_type = pd2ng_type_map.at(GetDataTypeOfVar(var)); - auto ng_type = var_type_map_.at(var_name); - auto prm = - std::make_shared(ng_type, sp, true); - (*var_node_map_)[var_name] = prm; - (*var_in_node_map_)[var_name] = prm; - } - } - } - } + if (this->op_state_ == OpState::FULL) { + this->op_state_ = this->is_test_ ? OpState::FULL_TEST : OpState::FULL_TRAIN; + } else { + this->op_state_ = + this->is_test_ ? OpState::PARTIAL_TEST : OpState::PARTIAL_TRAIN; } -} -void NgraphEngine::BuildNgNodes() { - for (auto& op : fused_ops_) { - for (auto& var_name_item : op->Outputs()) { + int idx = interval[0]; + while (idx < interval[1]) { + this->fused_ops_.emplace_back( + framework::OpRegistry::CreateOp(*(ops_desc[idx]))); + ++idx; + } + while (ops_desc.at(idx)->Type() != framework::kFetchOpType) { + auto op_desc = ops_desc.at(idx); + for (auto& var_name_item : op_desc->Inputs()) { for (auto& var_name : var_name_item.second) { - if (var_node_map_->find(var_name) == var_node_map_->end()) { - auto* var = scope_.FindVar(var_name); - if (var && var->IsType()) { - auto* tensor_pd = GetLoDTensorOrSelectedRowsValueFromVar(*var); - auto& ddim = tensor_pd->dims(); - auto ng_shape = Ddim2Shape(ddim); - auto ng_type = var_type_map_.at(var_name); - auto prm = std::make_shared(ng_type, - ng_shape, true); - (*var_node_map_)[var_name] = prm; - } - } + this->post_op_inputs_.insert(var_name); } } + ++idx; } - NgraphBridge ngb(var_node_map_); - for (auto& op : fused_ops_) { - ngb.BuildNgNode(op); - } + + BuildNgIO(ops_desc, interval); } -void NgraphEngine::BuildNgIO() { +void NgraphEngine::BuildNgIO(const std::vector& ops_desc, + const std::vector& interval) { std::unordered_set inputs; std::unordered_set outputs; - - for (auto& op : fused_ops_) { + for (int i = interval[0]; i < interval[1]; ++i) { + auto op = ops_desc[i]; for (auto& var_name_item : op->Inputs()) { for (auto& var_name : var_name_item.second) { inputs.insert(var_name); @@ -302,15 +320,11 @@ void NgraphEngine::BuildNgIO() { std::find(var_in_.begin(), var_in_.end(), var_name) == var_in_.end()) { // fill var_in here to keep lhs and rhs order - var_in_.push_back(var_name); + this->var_in_.emplace_back(var_name); } } } - if (op->Type() != "fill_constant") { - GetNgInputShape(op); - } - for (auto& var_name_item : op->Outputs()) { PADDLE_ENFORCE_LE(var_name_item.second.size(), 1, "op %s has more than 1 output - Not handling yet", @@ -322,172 +336,278 @@ void NgraphEngine::BuildNgIO() { } // var_out.clear(); - for (auto& op : fused_ops_) { + for (int i = interval[0]; i < interval[1]; ++i) { + auto op = ops_desc[i]; for (auto& var_name_item : op->Outputs()) { PADDLE_ENFORCE_LE(var_name_item.second.size(), 1, "op %s has more than 1 output - Not handling yet", op->Type()); for (auto& var_name : var_name_item.second) { - switch (ng_op_state_) { + switch (this->op_state_) { case OpState::PARTIAL_TEST: if (post_op_inputs_.find(var_name) != post_op_inputs_.end() || - fetches_.find(var_name) != fetches_.end()) { - var_out_.push_back(var_name); + find(fetch_vars.begin(), fetch_vars.end(), var_name) != + fetch_vars.end()) { + this->var_out_.emplace_back(var_name); } break; case OpState::FULL_TEST: - if (fetches_.find(var_name) != fetches_.end()) { - var_out_.push_back(var_name); + if (find(fetch_vars.begin(), fetch_vars.end(), var_name) != + fetch_vars.end()) { + this->var_out_.emplace_back(var_name); } break; case OpState::PARTIAL_TRAIN: - if (fetches_.find(var_name) != fetches_.end() || + if (find(fetch_vars.begin(), fetch_vars.end(), var_name) != + fetch_vars.end() || post_op_inputs_.find(var_name) != post_op_inputs_.end() || persistables_.find(var_name) != persistables_.end()) { - var_out_.push_back(var_name); + this->var_out_.emplace_back(var_name); } break; case OpState::FULL_TRAIN: - if (fetches_.find(var_name) != fetches_.end() || + if (find(fetch_vars.begin(), fetch_vars.end(), var_name) != + fetch_vars.end() || persistables_.find(var_name) != persistables_.end()) { - var_out_.push_back(var_name); + this->var_out_.emplace_back(var_name); } break; default: - var_out_.push_back(var_name); + this->var_out_.emplace_back(var_name); } } } } + for (size_t i = 0; i < var_in_.size(); ++i) { + auto var_name = var_in_[i]; + if (persistables_.find(var_name) == persistables_.end()) { + var_in_updates_.emplace_back(i); + } + } } -void NgraphEngine::BuildNgFunction() { +void NgraphEngine::GetNgInputShape() { + for (auto& var_name : var_in_) { + auto* var = scope_.FindVar(var_name); + if (var && var->IsType()) { + auto* tensor_pd = GetLoDTensorOrSelectedRowsValueFromVar(*var); + auto sp = Ddim2Shape(tensor_pd->dims()); + auto ng_type = var_type_map_[var_name]; + auto prm = std::make_shared(ng_type, sp, true); + (*var_node_map_)[var_name] = prm; + (*var_in_node_map_)[var_name] = prm; + } + } +} + +void NgraphEngine::BuildNgNodes() { + for (auto& op : fused_ops_) { + for (auto& var_name_item : op->Outputs()) { + for (auto& var_name : var_name_item.second) { + if (var_node_map_->find(var_name) == var_node_map_->end()) { + auto* var = scope_.FindVar(var_name); + if (var && var->IsType()) { + auto* tensor_pd = GetLoDTensorOrSelectedRowsValueFromVar(*var); + auto& ddim = tensor_pd->dims(); + auto ng_shape = Ddim2Shape(ddim); + auto ng_type = var_type_map_[var_name]; + auto prm = std::make_shared(ng_type, + ng_shape, true); + (*var_node_map_)[var_name] = prm; + } + } + } + } + } + + NgraphBridge ngb(var_node_map_); + for (auto& op : fused_ops_) { + ngb.BuildNgNode(op); + } +} + +void NgraphEngine::RunInferShape() { + for (auto& op : fused_ops_) { + framework::RuntimeContext ctx(op->Inputs(), op->Outputs(), scope_); + op->RuntimeInferShape(scope_, place_, ctx); + } +} + +void NgraphEngine::BuildNgFunction(const std::vector& interval) { + Prepare(interval); + RunInferShape(); + GetNgInputShape(); BuildNgNodes(); ngraph_function_ = nullptr; ngraph::NodeVector func_outputs; ngraph::ParameterVector func_inputs; for (auto& vo : var_out_) { - func_outputs.push_back(var_node_map_->at(vo)); + func_outputs.emplace_back(var_node_map_->at(vo)); } for (auto& vi : var_in_) { std::shared_ptr prm = std::dynamic_pointer_cast( var_in_node_map_->at(vi)); - func_inputs.push_back(prm); + func_inputs.emplace_back(prm); } ngraph_function_ = std::make_shared(func_outputs, func_inputs); } -void NgraphEngine::GetNgFunction() { - bool cache_on = true; - if (cache_on) { - std::string input_shape_str; - for (auto& var_name : var_in_) { - auto shape = var_node_map_->at(var_name)->get_shape(); - for (size_t i = 0; i < shape.size(); ++i) { - input_shape_str += std::to_string(shape.at(i)); +void NgraphEngine::GetNgFunction(std::string engine_key, + const std::vector& interval) { + bool use_cache = true; + if (use_cache) { + this->func_cache_key_ = ""; + for (int i = 0; i < std::min(static_cast(feed_vars.size()), 10); ++i) { + auto* var = scope_.FindVar(feed_vars[i]); + if (var && var->IsType()) { + auto* tensor_pd = GetLoDTensorOrSelectedRowsValueFromVar(*var); + auto dims = tensor_pd->dims(); + for (int j = 0; j < dims.size(); ++j) { + func_cache_key_ += std::to_string(dims[j]); + } } } - func_cache_key_ = input_shape_str + func_cache_key_; - if (func_cache_.find(func_cache_key_) != func_cache_.end()) { - ngraph_function_ = func_cache_.at(func_cache_key_); - } else { - BuildNgFunction(); - func_cache_[func_cache_key_] = ngraph_function_; + func_cache_key_ += std::to_string(interval[0]) + "_" + + std::to_string(interval[1]) + engine_key; + func_cache_key_ = std::to_string(std::hash()(func_cache_key_)); + + if (engine_cache.find(func_cache_key_) != engine_cache.end()) { + if (engine_cache[func_cache_key_].persistables.size() == 0) { + engine_cache.clear(); + t_in_cache_.clear(); + } else { + auto var_name = engine_cache[func_cache_key_].persistables.begin(); + framework::Variable* var = scope_.FindVar(*var_name); + if (var != pre_var_ptr) { + engine_cache.clear(); + t_in_cache_.clear(); + } + pre_var_ptr = var; + } + } + + if (engine_cache.find(func_cache_key_) == engine_cache.end()) { + BuildNgFunction(interval); + engine_cache[func_cache_key_].ngraph_function = this->ngraph_function_; + engine_cache[func_cache_key_].persistables = this->persistables_; + engine_cache[func_cache_key_].var_in_updates = this->var_in_updates_; + engine_cache[func_cache_key_].var_in = this->var_in_; + engine_cache[func_cache_key_].var_out = this->var_out_; + engine_cache[func_cache_key_].is_test = this->is_test_; } } else { - BuildNgFunction(); + BuildNgFunction(interval); } } void NgraphEngine::Run(const framework::Scope& scope, const platform::Place& place) const { - std::vector> t_in; - std::vector> t_out; + std::shared_ptr ng_func; + const std::set* p_persistables; + const std::vector* p_var_in_updates; + const std::vector* p_var_in; + const std::vector* p_var_out; + bool is_test; + + bool use_cache = true; + if (use_cache) { + PADDLE_ENFORCE(engine_cache.find(func_cache_key_) != engine_cache.end(), + "Cannot find cached data to run ngraph function"); + ng_func = engine_cache[func_cache_key_].ngraph_function; + p_persistables = &(engine_cache[func_cache_key_].persistables); + p_var_in_updates = &(engine_cache[func_cache_key_].var_in_updates); + p_var_in = &(engine_cache[func_cache_key_].var_in); + p_var_out = &(engine_cache[func_cache_key_].var_out); + is_test = engine_cache[func_cache_key_].is_test; + } else { + ng_func = ngraph_function_; + p_persistables = &this->persistables_; + p_var_in_updates = &this->var_in_updates_; + p_var_in = &this->var_in_; + p_var_out = &this->var_out_; + is_test = this->is_test_; + } - for (size_t i = 0; i < var_in_.size(); ++i) { - auto vi = var_in_.at(i); - auto sp = var_node_map_->at(vi)->get_shape(); - std::shared_ptr ti; - auto* var = scope.FindVar(vi); - if (var && var->IsType()) { - auto* tensor_pd = GetMutableLoDTensorOrSelectedRowsValueFromVar(var); - PADDLE_ENFORCE(sp == Ddim2Shape(tensor_pd->dims()), - "Ensure ngraph tensor layout align with paddle tensor"); - auto ng_type = var_type_map_.at(vi); - if (ng_type == ngraph::element::f32) { - auto pd_arr = tensor_pd->mutable_data(place); - ti = backend_->create_tensor(ngraph::element::f32, sp, pd_arr); - } else if (ng_type == ngraph::element::i32) { - const int* arr = tensor_pd->data(); - ti = backend_->create_tensor(ngraph::element::i32, sp, - const_cast(arr)); - } else if (ng_type == ngraph::element::i64) { - auto pd_arr = tensor_pd->mutable_data(place); - ti = backend_->create_tensor(ngraph::element::i64, sp, pd_arr); - } else if (ng_type == ngraph::element::f64) { - auto pd_arr = tensor_pd->mutable_data(place); - ti = backend_->create_tensor(ngraph::element::f64, sp, pd_arr); - } else if (ng_type == ngraph::element::boolean) { - auto pd_arr = tensor_pd->mutable_data(place); - ti = backend_->create_tensor(ngraph::element::boolean, sp, pd_arr); + std::vector>* p_t_in; + std::vector> t_in = {}; + + auto m_parameters = ng_func->get_parameters(); + auto m_results = ng_func->get_results(); + if (is_test && use_cache && + t_in_cache_.find(func_cache_key_) != t_in_cache_.end()) { + p_t_in = &(t_in_cache_[func_cache_key_]); + for (size_t i = 0; i < p_var_in_updates->size(); ++i) { + int index = p_var_in_updates->at(i); + auto vi = p_var_in->at(index); + auto sp = m_parameters[index]->get_shape(); + auto ng_type = m_parameters[index]->get_element_type(); + std::shared_ptr ti; + auto* var = scope.FindVar(vi); + if (var && var->IsType()) { + auto* tensor_pd = GetMutableLoDTensorOrSelectedRowsValueFromVar(var); + void* pd_arr = tensor_pd->mutable_data(place, ng2pd_type_map[ng_type]); + ti = backend_->create_tensor(ng_type, sp, pd_arr); + (*p_t_in)[index] = ti; } else { - PADDLE_THROW("Data type not handling for var %s", vi); + PADDLE_THROW("Cannot find var or tensor with var name %s", vi); } + } + } else { + if (is_test && use_cache) { + p_t_in = &(t_in_cache_[func_cache_key_]); } else { - PADDLE_THROW("Cannot find var or tensor with var name %s", vi); + p_t_in = &t_in; } - bool is_test = (ng_op_state_ == OpState::PARTIAL_TEST || - ng_op_state_ == OpState::FULL_TEST) - ? true - : false; - bool is_persistable = - (persistables_.find(vi) != persistables_.end()) ? true : false; - if (is_test && is_persistable) { - ti->set_stale(false); + + for (size_t i = 0; i < p_var_in->size(); ++i) { + auto vi = p_var_in->at(i); + auto sp = m_parameters[i]->get_shape(); + auto ng_type = m_parameters[i]->get_element_type(); + std::shared_ptr ti; + auto* var = scope.FindVar(vi); + if (var && var->IsType()) { + auto* tensor_pd = GetMutableLoDTensorOrSelectedRowsValueFromVar(var); + void* pd_arr = tensor_pd->mutable_data(place, ng2pd_type_map[ng_type]); + PADDLE_ENFORCE(sp == Ddim2Shape(tensor_pd->dims()), + "Ensure ngraph tensor layout align with paddle tensor"); + ti = backend_->create_tensor(ng_type, sp, pd_arr); + } else { + PADDLE_THROW("Cannot find var or tensor with var name %s", vi); + } + bool is_persistable = + (p_persistables->find(vi) != p_persistables->end()) ? true : false; + if (is_test && is_persistable) { + ti->set_stale(false); + } + (*p_t_in).emplace_back(ti); } - t_in.push_back(ti); } - for (size_t i = 0; i < var_out_.size(); ++i) { - auto vo = var_out_[i]; + std::vector> t_out = {}; + for (size_t i = 0; i < p_var_out->size(); ++i) { + auto vo = p_var_out->at(i); auto* var = scope.FindVar(vo); - std::shared_ptr to; if (var && var->IsType()) { + auto sp = m_results[i]->get_shape(); + var->GetMutable()->Resize(Shape2Ddim(sp)); auto* tensor_pd = GetMutableLoDTensorOrSelectedRowsValueFromVar(var); - auto dd = tensor_pd->dims(); - ngraph::Shape sp = Ddim2Shape(dd); - auto ng_type = var_type_map_.at(vo); - if (ng_type == ngraph::element::f32) { - auto pd_arr = tensor_pd->mutable_data(place); - to = backend_->create_tensor(ng_type, sp, pd_arr); - } else if (ng_type == ngraph::element::i64) { - auto pd_arr = tensor_pd->mutable_data(place); - to = backend_->create_tensor(ng_type, sp, pd_arr); - } else if (ng_type == ngraph::element::i32) { - auto pd_arr = tensor_pd->mutable_data(place); - to = backend_->create_tensor(ng_type, sp, pd_arr); - } else if (ng_type == ngraph::element::f64) { - auto pd_arr = tensor_pd->mutable_data(place); - to = backend_->create_tensor(ng_type, sp, pd_arr); - } else if (ng_type == ngraph::element::boolean) { - auto pd_arr = tensor_pd->mutable_data(place); - to = backend_->create_tensor(ng_type, sp, pd_arr); - } else { - PADDLE_THROW("Data type not handled in for var %s", vo); - } - t_out.push_back(to); + auto ng_type = m_results[i]->get_element_type(); + void* pd_arr = tensor_pd->mutable_data(place, ng2pd_type_map[ng_type]); + std::shared_ptr to = + backend_->create_tensor(ng_type, sp, pd_arr); + t_out.emplace_back(to); } else { PADDLE_THROW("Cannot find var or tensor with var name %s", vo); } } - auto handle = backend_->compile(ngraph_function_); - handle->call_with_validate(t_out, t_in); + auto handle = backend_->compile(ng_func); + handle->call_with_validate(t_out, *p_t_in); } // NgraphEngine::Run } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/ngraph/ngraph_engine.h b/paddle/fluid/operators/ngraph/ngraph_engine.h index bf5ff2a743b0edb69163e674d36c56a02c0b4153..fef51464b5702e61d052f28050f6aefaecf0f615 100644 --- a/paddle/fluid/operators/ngraph/ngraph_engine.h +++ b/paddle/fluid/operators/ngraph/ngraph_engine.h @@ -12,12 +12,18 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#ifndef PADDLE_FLUID_OPERATORS_NGRAPH_NGRAPH_ENGINE_H_ +#define PADDLE_FLUID_OPERATORS_NGRAPH_NGRAPH_ENGINE_H_ +#include +#include #include #include +#include #include #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/var_desc.h" #include "ngraph/ngraph.hpp" @@ -33,29 +39,47 @@ enum class OpState { /* nGraph support state on ops */ UNKNOWN /* Output all for debug purpose */ }; +// cache engine repetitives +struct EngineCache { + std::shared_ptr ngraph_function; + std::set persistables; + std::vector var_in; + std::vector var_out; + std::vector var_in_updates; + bool is_test = true; +}; + // perform graph build through bridge and execute computation class NgraphEngine { public: explicit NgraphEngine(const framework::Scope& scope, const platform::Place& place, - const std::string& serialized_graph, - const std::vector& interval); + const framework::ExecutionContext& ctx); void Run(const framework::Scope& scope, const platform::Place& place) const; - static void EnableNgraph(const framework::ProgramDesc& program); + static const framework::BlockDesc* p_bdesc; + static std::vector feed_vars, fetch_vars; + + static void FuseNgraphOps( + const framework::BlockDesc& prog, + std::vector>* ops); private: - static std::unordered_map> - func_cache_; + static std::unordered_map engine_cache; + static std::unordered_map< + std::string, std::vector>> + t_in_cache_; + static framework::Variable* pre_var_ptr; + const framework::Scope& scope_; const platform::Place& place_; std::vector> fused_ops_; std::unordered_map var_type_map_; - std::unordered_set persistables_; - std::unordered_set fetches_; + std::set persistables_; std::unordered_set post_op_inputs_; - OpState ng_op_state_ = OpState::UNKNOWN; + OpState op_state_ = OpState::UNKNOWN; + bool is_test_{true}; std::string func_cache_key_; // ngraph backend eg. CPU @@ -66,6 +90,8 @@ class NgraphEngine { std::vector var_in_; // var_name of outputs from fetch in order std::vector var_out_; + // non-persitable var_in + std::vector var_in_updates_; // map input vars to nodes std::shared_ptr< std::unordered_map>> @@ -74,20 +100,23 @@ class NgraphEngine { std::shared_ptr< std::unordered_map>> var_node_map_; - // prepare info for nraph engine - void Prepare(const framework::BlockDesc& block, - const std::vector& interval); + // prepare info for ngraph engine need + void Prepare(const std::vector& interval); + // get ngraph engine input and output list + void BuildNgIO(const std::vector& op_descs, + const std::vector& interval); // get ngraph input and define ngraph input parameters - void GetNgInputShape(std::shared_ptr op); + void GetNgInputShape(); // Call ngraph bridge to map ops void BuildNgNodes(); - // get the ngraph input and output var list - void BuildNgIO(); + // run paddle RuntimeInferShape to get the tensor shape + void RunInferShape(); // build ngraph function call - void BuildNgFunction(); + void BuildNgFunction(const std::vector& interval); // Check cache for ngraph function or otherwise build the function - void GetNgFunction(); + void GetNgFunction(std::string engine_key, const std::vector& interval); }; } // namespace operators } // namespace paddle +#endif // PADDLE_FLUID_OPERATORS_NGRAPH_NGRAPH_ENGINE_H_ diff --git a/paddle/fluid/operators/ngraph/ngraph_engine_op.cc b/paddle/fluid/operators/ngraph/ngraph_engine_op.cc index 3051ca123b29658d3e9a35239ad00f621a297cb5..f941f917c82b3b74a35739c08112233fd0a3477c 100644 --- a/paddle/fluid/operators/ngraph/ngraph_engine_op.cc +++ b/paddle/fluid/operators/ngraph/ngraph_engine_op.cc @@ -29,6 +29,7 @@ class NgraphEngineOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("Xs", "A list of inputs.").AsDispensable(); AddOutput("Ys", "A list of outputs").AsDispensable(); AddAttr("graph", "the graph."); + AddAttr("engine_key", "the engine hash key."); AddAttr>("interval", "op interval supported by ngraph"); AddComment("ngraph engine operator."); } diff --git a/paddle/fluid/operators/ngraph/ngraph_engine_op.h b/paddle/fluid/operators/ngraph/ngraph_engine_op.h index 2f194a9b8766316fc645f7e22e21fff048fb7d63..c9b2a3970e17c1a06fa0cc67aa15df304a30656e 100644 --- a/paddle/fluid/operators/ngraph/ngraph_engine_op.h +++ b/paddle/fluid/operators/ngraph/ngraph_engine_op.h @@ -46,10 +46,8 @@ class NgraphEngineKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto& scope = ctx.scope(); auto place = ctx.GetPlace(); - std::string serialized_graph = ctx.Attr("graph"); - auto interval = ctx.Attr>("interval"); - NgraphEngine ngraph_engine(scope, place, serialized_graph, interval); + NgraphEngine ngraph_engine(scope, place, ctx); ngraph_engine.Run(scope, place); } }; diff --git a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc index d3dcd1f96a986d2450c8af780a12183f7dfc66d5..f357c9c08d042b69259f229955922f2f11b52c63 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc @@ -22,9 +22,6 @@ class SequenceEnumerateOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - if (ctx->IsRuntime()) { - return; - } PADDLE_ENFORCE( ctx->HasInput("X"), "Input(X) of SequecceEnumerate operator should not be null."); @@ -62,6 +59,8 @@ class SequenceEnumerateOpMaker : public framework::OpProtoAndCheckerMaker { }); AddAttr("pad_value", "(int) The enumerate sequence padding value.") .SetDefault(0); + AddAttr(framework::kAllKernelsMustComputeRuntimeShape, "") + .SetDefault(true); AddComment(R"DOC( Sequence Enumerate Operator. diff --git a/paddle/fluid/platform/device_tracer.cc b/paddle/fluid/platform/device_tracer.cc index b084f1a649bd49af3d85bc933f50337b33371ca3..8458b17f82a976bad37df58dddc2c0d80c8eb13e 100644 --- a/paddle/fluid/platform/device_tracer.cc +++ b/paddle/fluid/platform/device_tracer.cc @@ -11,7 +11,6 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/platform/device_tracer.h" #include #include @@ -30,6 +29,8 @@ limitations under the License. */ #include "glog/logging.h" #include "google/protobuf/text_format.h" #include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/platform/device_tracer.h" +#include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/string/printf.h" namespace paddle { @@ -317,6 +318,24 @@ class DeviceTracerImpl : public DeviceTracer { stream_id, correlation_id, bytes}); } + void AddMemInfoRecord(uint64_t start_ns, uint64_t end_ns, size_t bytes, + const Place &place, const std::string &alloc_in, + const std::string &free_in, int64_t thread_id) { + if (0 == start_ns || 0 == end_ns) { + VLOG(3) << alloc_in << ", " << free_in << " Cannot be traced."; + return; + } + thread_local std::forward_list *local_mem_info_record = + nullptr; + if (local_mem_info_record == nullptr) { + std::lock_guard l(trace_mu_); + mem_info_record_.emplace_front(); + local_mem_info_record = &mem_info_record_.front(); + } + local_mem_info_record->emplace_front(MemInfoRecord{ + start_ns, end_ns, bytes, place, thread_id, alloc_in, free_in}); + } + void AddActiveKindRecords(const std::string &anno, uint64_t start_ns, uint64_t end_ns, int64_t device_id, int64_t thread_id, uint32_t correlation_id) { @@ -409,6 +428,7 @@ class DeviceTracerImpl : public DeviceTracer { correlations_.clear(); for (auto &tmp : correlations_pairs) tmp.clear(); for (auto &tmp : cpu_records_) tmp.clear(); + for (auto &tmp : mem_info_record_) tmp.clear(); for (auto &tmp : active_kind_records_) tmp.clear(); } @@ -440,9 +460,12 @@ class DeviceTracerImpl : public DeviceTracer { proto::Profile profile_pb; profile_pb.set_start_ns(start_ns_); profile_pb.set_end_ns(end_ns_); - if (correlations_.empty()) - for (auto &tmp : correlations_pairs) + if (correlations_.empty()) { + for (auto &tmp : correlations_pairs) { for (auto &pair : tmp) correlations_[pair.first] = pair.second; + } + } + for (const KernelRecord &r : kernel_records_) { auto *event = profile_pb.add_events(); event->set_type(proto::Event::GPUKernel); @@ -462,6 +485,7 @@ class DeviceTracerImpl : public DeviceTracer { event->set_device_id(r.device_id); } VLOG(1) << "KernelRecord event miss: " << miss << " find: " << find; + for (auto &tmp : cpu_records_) { for (const CPURecord &r : tmp) { auto *event = profile_pb.add_events(); @@ -473,6 +497,7 @@ class DeviceTracerImpl : public DeviceTracer { event->set_device_id(r.device_id); } } + for (auto &tmp : active_kind_records_) { for (const ActiveKindRecord &r : tmp) { auto *event = profile_pb.add_events(); @@ -510,6 +535,31 @@ class DeviceTracerImpl : public DeviceTracer { event->mutable_memcopy()->set_bytes(r.bytes); } VLOG(1) << "MemRecord event miss: " << miss << " find: " << find; + + for (auto &tmp : mem_info_record_) { + for (const auto &r : tmp) { + auto *event = profile_pb.add_mem_events(); + event->set_device_id(0); + if (platform::is_cpu_place(r.place)) { + event->set_place(proto::MemEvent::CPUPlace); + } else if (platform::is_gpu_place(r.place)) { + event->set_place(proto::MemEvent::CUDAPlace); + event->set_device_id( + boost::get(r.place).GetDeviceId()); + } else if (platform::is_cuda_pinned_place(r.place)) { + event->set_place(proto::MemEvent::CUDAPinnedPlace); + } else { + PADDLE_THROW("The current place is not supported."); + } + event->set_alloc_in(r.alloc_in); + event->set_free_in(r.free_in); + event->set_start_ns(r.start_ns); + event->set_end_ns(r.end_ns); + event->set_bytes(r.bytes); + event->set_thread_id(r.thread_id); + } + } + std::ofstream profile_f; profile_f.open(profile_path, std::ios::out | std::ios::trunc | std::ios::binary); @@ -553,6 +603,7 @@ class DeviceTracerImpl : public DeviceTracer { std::forward_list kernel_records_; std::forward_list mem_records_; std::forward_list> cpu_records_; + std::forward_list> mem_info_record_; std::forward_list> active_kind_records_; std::forward_list>> correlations_pairs; @@ -575,7 +626,7 @@ Event *CurAnnotation() { return annotation_stack.back(); } std::string CurAnnotationName() { - if (annotation_stack.empty()) return ""; + if (annotation_stack.empty()) return "Unknown"; return annotation_stack.back()->name(); } diff --git a/paddle/fluid/platform/device_tracer.h b/paddle/fluid/platform/device_tracer.h index a8f1d89383d7b82bed05633d1bfaae059577daa4..85168a046fb3fa4317956737871cde56e15bedfb 100644 --- a/paddle/fluid/platform/device_tracer.h +++ b/paddle/fluid/platform/device_tracer.h @@ -18,6 +18,7 @@ limitations under the License. */ #include "paddle/fluid/platform/dynload/cupti.h" #include "paddle/fluid/platform/event.h" +#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/port.h" #include "paddle/fluid/platform/profiler.pb.h" @@ -47,6 +48,7 @@ class DeviceTracer { int64_t stream_id; uint32_t correlation_id; }; + struct CPURecord { std::string name; uint64_t start_ns; @@ -54,6 +56,7 @@ class DeviceTracer { int64_t device_id; int64_t thread_id; }; + struct MemRecord { std::string name; uint64_t start_ns; @@ -63,6 +66,17 @@ class DeviceTracer { uint32_t correlation_id; uint64_t bytes; }; + + struct MemInfoRecord { + uint64_t start_ns; + uint64_t end_ns; + size_t bytes; + Place place; + int64_t thread_id; + std::string alloc_in; + std::string free_in; + }; + struct ActiveKindRecord { std::string name; uint64_t start_ns; @@ -71,6 +85,7 @@ class DeviceTracer { int64_t thread_id; uint32_t correlation_id; }; + virtual ~DeviceTracer() {} // Needs to be called once before use. virtual void Enable() = 0; @@ -97,6 +112,12 @@ class DeviceTracer { int64_t thread_id, uint32_t correlation_id) = 0; + virtual void AddMemInfoRecord(uint64_t start_ns, uint64_t end_ns, + size_t bytes, const Place& place, + const std::string& alloc_in, + const std::string& free_in, + int64_t thread_id) = 0; + // Add a cuda kernel stats. `correlation_id` will be mapped to annotation // added before for human readability. virtual void AddKernelRecords(std::string name, uint64_t start, uint64_t end, diff --git a/paddle/fluid/platform/event.h b/paddle/fluid/platform/event.h index 2dcf966754cbed2670acb9c3548c23355be5503c..e9bdb82a50fa4166cecdaea1de01d2f458f3da9a 100644 --- a/paddle/fluid/platform/event.h +++ b/paddle/fluid/platform/event.h @@ -13,10 +13,12 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once + #include #ifdef PADDLE_WITH_CUDA #include #endif +#include "paddle/fluid/platform/place.h" namespace paddle { namespace platform { @@ -64,5 +66,36 @@ class Event { #endif #endif }; + +class MemEvent { + public: + MemEvent(EventType type, uint64_t start_ns, uint64_t end_ns, size_t bytes, + Place place, int64_t thread_id, const std::string& annotation) + : type_(type), + start_ns_(start_ns), + end_ns_(end_ns), + bytes_(bytes), + place_(place), + thread_id_(thread_id), + annotation_(annotation) {} + + const EventType& type() const { return type_; } + uint64_t start_ns() const { return start_ns_; } + uint64_t end_ns() const { return end_ns_; } + size_t bytes() const { return bytes_; } + Place place() const { return place_; } + int64_t thread_id() const { return thread_id_; } + const std::string& annotation() const { return annotation_; } + + private: + EventType type_; + uint64_t start_ns_ = 0; + uint64_t end_ns_ = 0; + size_t bytes_; + Place place_; + int64_t thread_id_; + std::string annotation_; +}; + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 9a285a6b533dcb48013e3b3e4d34dc27186173ac..6d055a442106d88af39c771f3ddf156ba616c99f 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/platform/profiler.h" - #include #include #include @@ -21,6 +20,8 @@ limitations under the License. */ #include // NOLINT #include #include +#include + #ifdef PADDLE_WITH_CUDA #include #endif // PADDLE_WITH_CUDA @@ -36,8 +37,6 @@ DEFINE_bool(enable_rpc_profiler, false, "Enable rpc profiler or not."); namespace paddle { namespace platform { -struct EventList; - static int64_t profiler_lister_id = 0; static bool should_send_profile_state = false; std::mutex profiler_mu; @@ -53,43 +52,15 @@ static uint32_t g_next_thread_id = 0; // The global mutex static std::mutex g_all_event_lists_mutex; // The total event lists of all threads -static std::list> g_all_event_lists; +static std::list>> g_all_event_lists; // The thread local event list only can be accessed by the specific thread -static thread_local std::shared_ptr g_event_list; - -struct EventList { - constexpr static size_t kMB = 1024 * 1024; - constexpr static size_t kEventBlockSize = 16 * kMB; - constexpr static size_t kEventSize = sizeof(Event); - constexpr static size_t kEventAlign = alignof(Event); - constexpr static size_t kNumBlock = - kEventBlockSize / - ((kEventSize + kEventAlign - 1) / kEventAlign * kEventAlign); - - template - Event* Record(Args&&... args) { - if (event_blocks.empty() || event_blocks.front().size() == kNumBlock) { - event_blocks.emplace_front(); - event_blocks.front().reserve(kNumBlock); - } - event_blocks.front().emplace_back(std::forward(args)...); - return &event_blocks.front().back(); - } - - std::vector Reduce() { - std::vector result; - for (auto& block : event_blocks) { - result.insert(result.begin(), std::make_move_iterator(block.begin()), - std::make_move_iterator(block.end())); - } - event_blocks.clear(); - return result; - } +static thread_local std::shared_ptr> g_event_list; - void Clear() { event_blocks.clear(); } - - std::forward_list> event_blocks; -}; +static std::list>> g_all_mem_event_lists; +static thread_local std::shared_ptr> g_mem_event_list; +static std::mutex g_all_mem_event_lists_mutex; +static thread_local int32_t g_mem_thread_id; +static uint32_t g_mem_next_thread_id = 0; inline uint64_t GetTimeInNsec() { using clock = std::conditional &GetMemEventList() { + if (!g_mem_event_list) { + g_mem_event_list = std::make_shared>(); + std::lock_guard guard(g_all_mem_event_lists_mutex); + g_mem_thread_id = g_mem_next_thread_id++; + g_all_mem_event_lists.emplace_front(g_mem_event_list); + } + return *g_mem_event_list; +} + +void PushMemEvent(uint64_t start_ns, uint64_t end_ns, size_t bytes, + const Place &place, const std::string &annotation) { + GetMemEventList().Record(EventType::kPushRange, start_ns, end_ns, bytes, + place, g_mem_thread_id, annotation); +} + +void PopMemEvent(uint64_t start_ns, uint64_t end_ns, size_t bytes, + const Place &place, const std::string &annotation) { + GetMemEventList().Record(EventType::kPopRange, start_ns, end_ns, bytes, place, + g_mem_thread_id, annotation); +} + +inline EventList &GetEventList() { if (!g_event_list) { std::lock_guard guard(g_all_event_lists_mutex); - g_event_list = std::make_shared(); + g_event_list = std::make_shared>(); g_thread_id = g_next_thread_id++; g_all_event_lists.emplace_front(g_event_list); RecoreCurThreadId(g_thread_id); @@ -131,26 +124,26 @@ inline EventList& GetEventList() { return *g_event_list; } -void Mark(const std::string& name) { +void Mark(const std::string &name) { GetEventList().Record(EventType::kMark, name, g_thread_id); } -Event* PushEvent(const std::string& name) { +Event *PushEvent(const std::string &name) { return GetEventList().Record(EventType::kPushRange, name, g_thread_id); } -void PopEvent(const std::string& name) { +void PopEvent(const std::string &name) { GetEventList().Record(EventType::kPopRange, name, g_thread_id); } -RecordEvent::RecordEvent(const std::string& name) +RecordEvent::RecordEvent(const std::string &name) : is_enabled_(false), start_ns_(PosixInNsec()) { if (g_state == ProfilerState::kDisabled) return; // lock is not needed, the code below is thread-safe is_enabled_ = true; name_ = name; - Event* e = PushEvent(name_); + Event *e = PushEvent(name_); // Maybe need the same push/pop behavior. SetCurAnnotation(e); } @@ -158,7 +151,7 @@ RecordEvent::RecordEvent(const std::string& name) RecordEvent::~RecordEvent() { if (g_state == ProfilerState::kDisabled || !is_enabled_) return; // lock is not needed, the code below is thread-safe - DeviceTracer* tracer = GetDeviceTracer(); + DeviceTracer *tracer = GetDeviceTracer(); if (tracer) { tracer->AddCPURecords(CurAnnotationName(), start_ns_, PosixInNsec(), BlockDepth(), g_thread_id); @@ -167,7 +160,56 @@ RecordEvent::~RecordEvent() { PopEvent(name_); } -RecordRPCEvent::RecordRPCEvent(const std::string& name) { +MemEvenRecorder MemEvenRecorder::recorder; + +void MemEvenRecorder::PushMemRecord(const void *ptr, const Place &place, + size_t size) { + if (g_state == ProfilerState::kDisabled) return; + std::lock_guard guard(mtx_); + auto &events = address_memevent_[place]; + PADDLE_ENFORCE(events.count(ptr) == 0, ""); + events.emplace(ptr, std::unique_ptr( + new MemEvenRecorder::RecordMemEvent(place, size))); +} + +void MemEvenRecorder::PopMemRecord(const void *ptr, const Place &place) { + if (g_state == ProfilerState::kDisabled) return; + std::lock_guard guard(mtx_); + auto &events = address_memevent_[place]; + auto iter = events.find(ptr); + // The ptr maybe not in address_memevent + if (iter != events.end()) { + events.erase(iter); + } +} + +void MemEvenRecorder::Flush() { + std::lock_guard guard(mtx_); + address_memevent_.clear(); +} + +MemEvenRecorder::RecordMemEvent::RecordMemEvent(const Place &place, + size_t bytes) + : place_(place), + bytes_(bytes), + start_ns_(PosixInNsec()), + alloc_in_(CurAnnotationName()) { + PushMemEvent(start_ns_, end_ns_, bytes_, place_, alloc_in_); +} + +MemEvenRecorder::RecordMemEvent::~RecordMemEvent() { + DeviceTracer *tracer = GetDeviceTracer(); + end_ns_ = PosixInNsec(); + + auto annotation_free = CurAnnotationName(); + if (tracer) { + tracer->AddMemInfoRecord(start_ns_, end_ns_, bytes_, place_, alloc_in_, + annotation_free, g_mem_thread_id); + } + PopMemEvent(start_ns_, end_ns_, bytes_, place_, annotation_free); +} + +RecordRPCEvent::RecordRPCEvent(const std::string &name) { if (FLAGS_enable_rpc_profiler) { event_.reset(new platform::RecordEvent(name)); } @@ -185,7 +227,7 @@ RecordBlock::RecordBlock(int block_id) RecordBlock::~RecordBlock() { // lock is not needed, the code below is thread-safe if (g_state == ProfilerState::kDisabled || !is_enabled_) return; - DeviceTracer* tracer = GetDeviceTracer(); + DeviceTracer *tracer = GetDeviceTracer(); if (tracer) { // We try to put all blocks at the same nested depth in the // same timeline lane. and distinguish the using thread_id. @@ -232,11 +274,16 @@ void EnableProfiler(ProfilerState state) { void ResetProfiler() { SynchronizeAllDevice(); GetDeviceTracer()->Reset(); + MemEvenRecorder::Instance().Flush(); std::lock_guard guard(g_all_event_lists_mutex); for (auto it = g_all_event_lists.begin(); it != g_all_event_lists.end(); ++it) { (*it)->Clear(); } + for (auto it = g_all_mem_event_lists.begin(); + it != g_all_mem_event_lists.end(); ++it) { + (*it)->Clear(); + } } std::vector> GetAllEvents() { @@ -249,6 +296,15 @@ std::vector> GetAllEvents() { return result; } +std::vector> GetMemEvents() { + std::lock_guard guard(g_all_mem_event_lists_mutex); + std::vector> result; + for (auto &it : g_all_mem_event_lists) { + result.emplace_back((*it).Reduce()); + } + return result; +} + // The information of each event given in the profiling report struct EventItem { std::string name; @@ -263,8 +319,8 @@ struct EventItem { }; // Print results -void PrintProfiler(const std::vector>& events_table, - const std::string& sorted_domain, const size_t name_width, +void PrintProfiler(const std::vector> &events_table, + const std::string &sorted_domain, const size_t name_width, const size_t data_width, bool merge_thread) { // Output header information std::cout << "\n------------------------->" @@ -302,7 +358,7 @@ void PrintProfiler(const std::vector>& events_table, << std::setw(data_width) << "Ratio." << std::endl; for (size_t i = 0; i < events_table.size(); ++i) { for (size_t j = 0; j < events_table[i].size(); ++j) { - const EventItem& event_item = events_table[i][j]; + const EventItem &event_item = events_table[i][j]; std::cout << std::setw(name_width) << event_item.name << std::setw(data_width) << event_item.calls << std::setw(data_width) << event_item.total_time; @@ -326,54 +382,54 @@ void PrintProfiler(const std::vector>& events_table, } // Parse the event list and output the profiling report -void ParseEvents(const std::vector>& events, +void ParseEvents(const std::vector> &events, bool merge_thread, EventSortingKey sorted_by = EventSortingKey::kDefault) { if (g_state == ProfilerState::kDisabled) return; if (merge_thread && events.size() < 2) return; std::string sorted_domain; - std::function sorted_func; + std::function sorted_func; switch (sorted_by) { case EventSortingKey::kCalls: sorted_domain = "number of calls"; - sorted_func = [](const EventItem& a, const EventItem& b) { + sorted_func = [](const EventItem &a, const EventItem &b) { return a.calls > b.calls; }; break; case EventSortingKey::kTotal: sorted_domain = "total time"; - sorted_func = [](const EventItem& a, const EventItem& b) { + sorted_func = [](const EventItem &a, const EventItem &b) { return a.total_time > b.total_time; }; break; case EventSortingKey::kMin: sorted_domain = "minimum time"; - sorted_func = [](const EventItem& a, const EventItem& b) { + sorted_func = [](const EventItem &a, const EventItem &b) { return a.min_time > b.min_time; }; break; case EventSortingKey::kMax: sorted_domain = "maximum time"; - sorted_func = [](const EventItem& a, const EventItem& b) { + sorted_func = [](const EventItem &a, const EventItem &b) { return a.max_time > b.max_time; }; break; case EventSortingKey::kAve: sorted_domain = "average time"; - sorted_func = [](const EventItem& a, const EventItem& b) { + sorted_func = [](const EventItem &a, const EventItem &b) { return a.ave_time > b.ave_time; }; break; case EventSortingKey::kGPUTime: sorted_domain = "average time"; - sorted_func = [](const EventItem& a, const EventItem& b) { + sorted_func = [](const EventItem &a, const EventItem &b) { return a.gpu_time > b.gpu_time; }; break; case EventSortingKey::kCPUTime: sorted_domain = "average time"; - sorted_func = [](const EventItem& a, const EventItem& b) { + sorted_func = [](const EventItem &a, const EventItem &b) { return a.cpu_time > b.cpu_time; }; break; @@ -381,7 +437,7 @@ void ParseEvents(const std::vector>& events, sorted_domain = "event first end time"; } - const std::vector>* analyze_events; + const std::vector> *analyze_events; std::vector> merged_events_list; if (merge_thread) { std::vector merged_events; @@ -469,7 +525,7 @@ void ParseEvents(const std::vector>& events, } } // average time - for (auto& item : event_items) { + for (auto &item : event_items) { item.ave_time = item.total_time / item.calls; item.ratio = item.total_time / total; } @@ -493,15 +549,77 @@ void ParseEvents(const std::vector>& events, merge_thread); } +struct MemoryProfierReport { + size_t alloc_times{0}; + size_t alloc_size{0}; + size_t free_times{0}; + size_t free_size{0}; +}; + +// Print results +void PrintMemProfiler( + const std::map> + &annotation_report, + const size_t name_width, const size_t data_width) { + // Output header information + std::cout << "\n------------------------->" + << " Memory Profiling Report " + << "<-------------------------\n\n"; + + // Output events table + std::cout.setf(std::ios::left); + std::cout << std::setw(name_width) << "Event" << std::setw(data_width) + << "Alloc Calls" << std::setw(data_width) << "Size(MB)" + << std::setw(data_width) << "Free Calls" << std::setw(data_width) + << "Size(MB)" << std::endl; + + for (auto &tmp : annotation_report) { + for (auto &e : tmp.second) { + auto event_name = string::Sprintf("%s:%s", tmp.first, e.first); + std::cout << std::setw(name_width) << event_name; + std::cout << std::setw(data_width) << e.second.alloc_times; + std::cout << std::setw(data_width) + << e.second.alloc_size / (1024.0 * 1024.0); + std::cout << std::setw(data_width) << e.second.free_times; + std::cout << std::setw(data_width) + << e.second.free_size / (1024.0 * 1024.0) << std::endl; + } + } + std::cout << std::endl; +} + +// parse memory events +void ParseMemEvents(const std::vector> &events) { + if (g_state == ProfilerState::kDisabled) return; + // place, annotation, alloc times, alloc size + std::map> + annotation_report; + + for (auto &tmp : events) { + for (auto &e : tmp) { + if (e.type() == EventType::kPushRange) { + annotation_report[e.place()][e.annotation()].alloc_times += 1; + annotation_report[e.place()][e.annotation()].alloc_size += e.bytes(); + } else if (e.type() == EventType::kPopRange) { + annotation_report[e.place()][e.annotation()].free_times += 1; + annotation_report[e.place()][e.annotation()].free_size += e.bytes(); + } + } + } + PrintMemProfiler(annotation_report, 55, 18); +} + void DisableProfiler(EventSortingKey sorted_key, - const std::string& profile_path) { + const std::string &profile_path) { SynchronizeAllDevice(); + MemEvenRecorder::Instance().Flush(); + std::lock_guard l(profiler_mu); if (g_state == ProfilerState::kDisabled) return; // Mark the profiling stop. Mark("_stop_profiler_"); - DeviceTracer* tracer = GetDeviceTracer(); + DeviceTracer *tracer = GetDeviceTracer(); if (tracer->IsEnabled()) { tracer->Disable(); tracer->GenProfile(profile_path); @@ -511,6 +629,11 @@ void DisableProfiler(EventSortingKey sorted_key, std::vector> all_events = GetAllEvents(); ParseEvents(all_events, true, sorted_key); ParseEvents(all_events, false, sorted_key); + if (VLOG_IS_ON(5)) { + std::vector> all_mem_events = GetMemEvents(); + ParseMemEvents(all_mem_events); + } + ResetProfiler(); g_state = ProfilerState::kDisabled; should_send_profile_state = true; diff --git a/paddle/fluid/platform/profiler.h b/paddle/fluid/platform/profiler.h index aec0ae34292d62905de0e1f459b2b6db4554ebb7..8d11855b70de824159f19f2997b876564e7719b1 100644 --- a/paddle/fluid/platform/profiler.h +++ b/paddle/fluid/platform/profiler.h @@ -15,10 +15,17 @@ limitations under the License. */ #pragma once #include #include +#include +#include +#include // NOLINT #include +#include +#include +#include #include #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/event.h" +#include "paddle/fluid/platform/place.h" #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/gpu_info.h" #endif @@ -34,8 +41,41 @@ enum ProfilerState { void Mark(const std::string& name); -Event* PushEvent(const std::string& name); +void PushMemEvent(uint64_t start_ns, uint64_t end_ns, size_t bytes, + const Place& place); +void PopMemEvent(uint64_t start_ns, uint64_t end_ns, size_t bytes, + const Place& place); + +struct MemEvenRecorder { + public: + void PushMemRecord(const void* ptr, const Place& place, size_t size); + void PopMemRecord(const void* ptr, const Place& place); + void Flush(); + static MemEvenRecorder& Instance() { return recorder; } + private: + struct RecordMemEvent { + RecordMemEvent(const Place& place, size_t bytes); + ~RecordMemEvent(); + + Place place_; + size_t bytes_; + uint64_t start_ns_; + uint64_t end_ns_; + std::string alloc_in_; + std::string free_in_; + }; + + static MemEvenRecorder recorder; + std::map>> + address_memevent_; + std::mutex mtx_; + MemEvenRecorder() {} + DISABLE_COPY_AND_ASSIGN(MemEvenRecorder); +}; + +Event* PushEvent(const std::string& name); void PopEvent(const std::string& name); struct RecordEvent { @@ -87,6 +127,41 @@ enum EventSortingKey { kGPUTime }; +template +struct EventList { + constexpr static size_t kMB = 1024 * 1024; + constexpr static size_t kEventBlockSize = 16 * kMB; + constexpr static size_t kEventSize = sizeof(T); + constexpr static size_t kEventAlign = alignof(T); + constexpr static size_t kNumBlock = + kEventBlockSize / + ((kEventSize + kEventAlign - 1) / kEventAlign * kEventAlign); + + template + T* Record(Args&&... args) { + if (event_blocks.empty() || event_blocks.front().size() == kNumBlock) { + event_blocks.emplace_front(); + event_blocks.front().reserve(kNumBlock); + } + event_blocks.front().emplace_back(std::forward(args)...); + return &event_blocks.front().back(); + } + + std::vector Reduce() { + std::vector result; + for (auto& block : event_blocks) { + result.insert(result.begin(), std::make_move_iterator(block.begin()), + std::make_move_iterator(block.end())); + } + event_blocks.clear(); + return result; + } + + void Clear() { event_blocks.clear(); } + + std::forward_list> event_blocks; +}; + // Enable the profiling function. void EnableProfiler(ProfilerState state); diff --git a/paddle/fluid/platform/profiler.proto b/paddle/fluid/platform/profiler.proto index e761d7b266e92fd5d47b5b6073ffc8bea1dc877d..cfa3c6906f83f750c8d6dc654f29b8fe95ec17ac 100644 --- a/paddle/fluid/platform/profiler.proto +++ b/paddle/fluid/platform/profiler.proto @@ -34,8 +34,25 @@ message Event { optional string detail_info = 9; } +message MemEvent { + enum Place { + CUDAPlace = 0; + CPUPlace = 1; + CUDAPinnedPlace = 2; + } + optional uint64 start_ns = 1; + optional uint64 end_ns = 2; + optional uint64 bytes = 3; + optional Place place = 4; + optional uint64 thread_id = 5; + optional uint32 device_id = 6; + optional string alloc_in = 7; + optional string free_in = 8; +} + message Profile { repeated Event events = 1; optional uint64 start_ns = 2; optional uint64 end_ns = 3; + repeated MemEvent mem_events = 4; } \ No newline at end of file diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 395093a1f5a60f3f978970d8a7f90416baafff4c..552a5e0c3289b022041c6ea4f26694ed24aa858d 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -94,6 +94,14 @@ bool IsCompiledWithMKLDNN() { #endif } +bool IsCompiledWithNGRAPH() { +#ifndef PADDLE_WITH_NGRAPH + return false; +#else + return true; +#endif +} + bool IsCompiledWithBrpc() { #ifndef PADDLE_WITH_DISTRIBUTE return false; @@ -874,6 +882,7 @@ All parameter, weight, gradient are variables in Paddle. m.def("init_devices", [](bool init_p2p) { framework::InitDevices(init_p2p); }); + m.def("is_compiled_with_ngraph", IsCompiledWithNGRAPH); m.def("is_compiled_with_cuda", IsCompiledWithCUDA); m.def("is_compiled_with_mkldnn", IsCompiledWithMKLDNN); m.def("is_compiled_with_brpc", IsCompiledWithBrpc); @@ -1242,7 +1251,7 @@ All parameter, weight, gradient are variables in Paddle. cannot be updated after being finalized.)DOC"); pe.def(py::init &, - const std::unordered_set &, const std::string &, + const std::vector &, const std::string &, Scope *, std::vector &, const ExecutionStrategy &, const BuildStrategy &, ir::Graph *>()) // NOTE: even we return a vec* to Python use reference policy. diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 9899eee8841147a509b7997fd905a1b68bc098da..dc0a6dcdedaf39c0c489bb6f6e3eb28e6b58a21d 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -455,7 +455,11 @@ function assert_api_spec_approvals() { # NOTE: per_page=10000 should be ok for all cases, a PR review > 10000 is not human readable. if [ "$API_FILE" == "paddle/fluid/API.spec" ];then APPROVALS=`curl -H "Authorization: token ${GITHUB_API_TOKEN}" https://api.github.com/repos/PaddlePaddle/Paddle/pulls/${GIT_PR_ID}/reviews?per_page=10000 | \ - python ${PADDLE_ROOT}/tools/check_pr_approval.py 2 2887803 35982308` + python ${PADDLE_ROOT}/tools/check_pr_approval.py 2 2887803 35982308 46782768 30176695` + if [ "${APPROVALS}" == "TRUE" ];then + APPROVALS=`curl -H "Authorization: token ${GITHUB_API_TOKEN}" https://api.github.com/repos/PaddlePaddle/Paddle/pulls/${GIT_PR_ID}/reviews?per_page=10000 | \ + python ${PADDLE_ROOT}/tools/check_pr_approval.py 1 35982308` + fi else APPROVALS=`curl -H "Authorization: token ${GITHUB_API_TOKEN}" https://api.github.com/repos/PaddlePaddle/Paddle/pulls/${GIT_PR_ID}/reviews?per_page=10000 | \ python ${PADDLE_ROOT}/tools/check_pr_approval.py 1 2887803` @@ -463,7 +467,7 @@ function assert_api_spec_approvals() { echo "current pr ${GIT_PR_ID} got approvals: ${APPROVALS}" if [ "${APPROVALS}" == "FALSE" ]; then if [ "$API_FILE" == "paddle/fluid/API.spec" ];then - echo "You must have panyx0718 and shanyi15 approval for the api change! ${API_FILE}" + echo "You must have one RD (panyx0718 or chengduoZH or XiaoguangHu01) and one PM (shanyi15) approval for the api change! ${API_FILE}" else echo "You must have panyx0718 approval for the api change! ${API_FILE}" fi diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 103c4d3dd067abfc5e7ab75c92aa124f39b1972e..dd35c6deafcae4c90a5a102cf06c0e05cddcfcd2 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -125,7 +125,7 @@ def __bootstrap__(): os.environ['OMP_NUM_THREADS'] = str(num_threads) sysstr = platform.system() read_env_flags = [ - 'check_nan_inf', 'benchmark', 'eager_delete_scope', 'use_ngraph', + 'check_nan_inf', 'benchmark', 'eager_delete_scope', 'initial_cpu_memory_in_mb', 'init_allocated_mem', 'free_idle_memory', 'paddle_num_threads', "dist_threadpool_size", 'eager_delete_tensor_gb', 'fast_eager_deletion_mode', 'memory_fraction_of_eager_deletion', @@ -143,6 +143,9 @@ def __bootstrap__(): if core.is_compiled_with_mkldnn(): read_env_flags.append('use_mkldnn') + if core.is_compiled_with_ngraph(): + read_env_flags.append('use_ngraph') + if core.is_compiled_with_dist(): read_env_flags.append('rpc_deadline') read_env_flags.append('rpc_server_profile_path') diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index 8f60f6f8b54f799b2495f92ac5b1914ed68387f7..2bf30e39d35e53cc3c61ec538d0d65ec1315b0f0 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -230,13 +230,17 @@ class CompiledProgram(object): self._persistable_vars.append(cpt.to_text(node.name())) places = list(map(_place_obj, self._places)) - - return core.ParallelExecutor(places, - set(self._persistable_vars), - cpt.to_text(self._loss_name) - if self._loss_name else six.u(''), scope, - self._local_scopes, self._exec_strategy, - self._build_strategy, self._graph) + # ParallelExecutor would broadcast all the parameters during initializing. + # The parameters of each process should be in the same ordered for the data-parallelism + # distributed training to keep the broadcast correct. + self._persistable_vars = list(set(self._persistable_vars)) + self._persistable_vars.sort() + + return core.ParallelExecutor( + places, self._persistable_vars, + cpt.to_text(self._loss_name) + if self._loss_name else six.u(''), self._scope, self._local_scopes, + self._exec_strategy, self._build_strategy, self._graph) def _compile_inference(self): return core.create_paddle_predictor(self._infer_config) diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 4381727a090bdb1d13fb692e64e8d6fb69bba0d7..f018bb8af8cc9f7ed965c86d5aff40352014c393 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -23,6 +23,7 @@ __activations_noattr__ = [ 'logsigmoid', 'exp', 'tanh', + 'atan', 'tanh_shrink', 'softshrink', 'sqrt', @@ -30,6 +31,8 @@ __activations_noattr__ = [ 'ceil', 'floor', 'cos', + 'acos', + 'asin', 'sin', 'round', 'reciprocal', diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index d5a838540994abcd1407fd258e723218670bfb58..d587715d607c6da16da5c009db16322e8cd7d176 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -100,6 +100,23 @@ class TestTanh(TestActivation): self.check_grad(['X'], 'Out', max_relative_error=0.007) +class TestAtan(TestActivation): + def setUp(self): + self.op_type = "atan" + self.init_dtype() + + x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) + out = np.arctan(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + + def test_check_grad(self): + if self.dtype == np.float16: + return + self.check_grad(['X'], 'Out', max_relative_error=0.007) + + class TestTanhShrink(TestActivation): def setUp(self): self.op_type = "tanh_shrink" @@ -248,6 +265,23 @@ class TestCos(TestActivation): self.check_grad(['X'], 'Out', max_relative_error=0.007) +class TestAcos(TestActivation): + def setUp(self): + self.op_type = "acos" + self.init_dtype() + + x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + out = np.arccos(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + + def test_check_grad(self): + if self.dtype == np.float16: + return + self.check_grad(['X'], 'Out', max_relative_error=0.007) + + class TestSin(TestActivation): def setUp(self): self.op_type = "sin" @@ -265,6 +299,23 @@ class TestSin(TestActivation): self.check_grad(['X'], 'Out', max_relative_error=0.007) +class TestAsin(TestActivation): + def setUp(self): + self.op_type = "asin" + self.init_dtype() + + x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + out = np.arcsin(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + + def test_check_grad(self): + if self.dtype == np.float16: + return + self.check_grad(['X'], 'Out', max_relative_error=0.007) + + class TestRound(TestActivation): def setUp(self): self.op_type = "round" @@ -665,7 +716,10 @@ create_test_act_fp16_class(TestAbs) create_test_act_fp16_class(TestCeil, grad_check=False) create_test_act_fp16_class(TestFloor, grad_check=False) create_test_act_fp16_class(TestCos, grad_atol=0.85) +create_test_act_fp16_class(TestAcos, grad_atol=0.85) create_test_act_fp16_class(TestSin) +create_test_act_fp16_class(TestAsin) +create_test_act_fp16_class(TestAtan) create_test_act_fp16_class(TestRound, grad_check=False) create_test_act_fp16_class(TestRelu) create_test_act_fp16_class(TestGelu) diff --git a/tools/print_signatures.py b/tools/print_signatures.py index c56f30f724ca9f183d6c5cac427411b7711739a4..d32b247342cc0c37b7bcff7b676cb47a4f429dfd 100644 --- a/tools/print_signatures.py +++ b/tools/print_signatures.py @@ -51,6 +51,8 @@ def visit_member(parent_name, member): all = (args, doc) member_dict[cur_name] = all except TypeError: # special for PyBind method + if cur_name in check_modules_list: + return member_dict[cur_name] = " ".join([ line.strip() for line in pydoc.render_doc(member).split('\n') if "->" in line @@ -78,6 +80,7 @@ def visit_all_module(mod): visit_member(mod.__name__, instance) +check_modules_list = ["paddle.reader.ComposeNotAligned.__init__"] modules = sys.argv[1].split(",") for m in modules: visit_all_module(importlib.import_module(m)) diff --git a/tools/timeline.py b/tools/timeline.py index 78796664177ffa404f757dbdd34ff8c2b0ce2a93..694ab1d50fd78da7f873d3a31b0607d9fedf8dcf 100644 --- a/tools/timeline.py +++ b/tools/timeline.py @@ -95,6 +95,22 @@ class _ChromeTraceFormatter(object): event['args'] = args self._events.append(event) + def emit_counter(self, category, name, pid, timestamp, counter, value): + """Emits a record for a single counter. + + Args: + category: The event category as string + name: The event name as string + pid: Identifier of the process generating this event as integer + timestamp: The timestamps of this event as long integer + counter: Name of the counter as string + value: Value of the counter as integer + tid: Thread id of the allocation as integer + """ + event = self._create_event('C', category, name, pid, 0, timestamp) + event['args'] = {counter: value} + self._events.append(event) + def format_to_string(self, pretty=False): """Formats the chrome trace to a string. @@ -117,6 +133,7 @@ class Timeline(object): self._profile_dict = profile_dict self._pid = 0 self._devices = dict() + self._mem_devices = dict() self._chrome_trace = _ChromeTraceFormatter() def _allocate_pid(self): @@ -143,6 +160,45 @@ class Timeline(object): self._devices[(k, event.device_id, "GPUKernel")] = pid self._chrome_trace.emit_pid("%s:gpu:%d" % (k, event.device_id), pid) + for mevent in profile_pb.mem_events: + if mevent.place == profiler_pb2.MemEvent.CUDAPlace: + if (k, mevent.device_id, "GPU") not in self._mem_devices: + pid = self._allocate_pid() + self._mem_devices[(k, mevent.device_id, "GPU")] = pid + self._chrome_trace.emit_pid( + "memory usage on %s:gpu:%d" % (k, mevent.device_id), + pid) + elif mevent.place == profiler_pb2.MemEvent.CPUPlace: + if (k, mevent.device_id, "CPU") not in self._mem_devices: + pid = self._allocate_pid() + self._mem_devices[(k, mevent.device_id, "CPU")] = pid + self._chrome_trace.emit_pid( + "memory usage on %s:cpu:%d" % (k, mevent.device_id), + pid) + elif mevent.place == profiler_pb2.MemEvent.CUDAPinnedPlace: + if (k, mevent.device_id, "CUDAPinnedPlace" + ) not in self._mem_devices: + pid = self._allocate_pid() + self._mem_devices[(k, mevent.device_id, + "CUDAPinnedPlace")] = pid + self._chrome_trace.emit_pid( + "memory usage on %s:cudapinnedplace:%d" % + (k, mevent.device_id), pid) + if (k, 0, "CPU") not in self._mem_devices: + pid = self._allocate_pid() + self._mem_devices[(k, 0, "CPU")] = pid + self._chrome_trace.emit_pid("memory usage on %s:cpu:%d" % + (k, 0), pid) + if (k, 0, "GPU") not in self._mem_devices: + pid = self._allocate_pid() + self._mem_devices[(k, 0, "GPU")] = pid + self._chrome_trace.emit_pid("memory usage on %s:gpu:%d" % + (k, 0), pid) + if (k, 0, "CUDAPinnedPlace") not in self._mem_devices: + pid = self._allocate_pid() + self._mem_devices[(k, 0, "CUDAPinnedPlace")] = pid + self._chrome_trace.emit_pid( + "memory usage on %s:cudapinnedplace:%d" % (k, 0), pid) def _allocate_events(self): for k, profile_pb in six.iteritems(self._profile_dict): @@ -163,9 +219,57 @@ class Timeline(object): event.start_ns, (event.end_ns - event.start_ns) / 1.0, pid, event.sub_device_id, 'Op', event.name, args) + def _allocate_memory_event(self): + place_to_str = { + profiler_pb2.MemEvent.CPUPlace: "CPU", + profiler_pb2.MemEvent.CUDAPlace: "GPU", + profiler_pb2.MemEvent.CUDAPinnedPlace: "CUDAPinnedPlace" + } + for k, profile_pb in six.iteritems(self._profile_dict): + mem_list = [] + end_profiler = 0 + for mevent in profile_pb.mem_events: + crt_info = dict() + crt_info['time'] = mevent.start_ns + crt_info['size'] = mevent.bytes + if mevent.place in place_to_str: + place = place_to_str[mevent.place] + else: + place = "UnDefine" + crt_info['place'] = place + pid = self._mem_devices[(k, mevent.device_id, place)] + crt_info['pid'] = pid + crt_info['thread_id'] = mevent.thread_id + crt_info['device_id'] = mevent.device_id + mem_list.append(crt_info) + crt_info = dict() + crt_info['place'] = place + crt_info['pid'] = pid + crt_info['thread_id'] = mevent.thread_id + crt_info['device_id'] = mevent.device_id + crt_info['time'] = mevent.end_ns + crt_info['size'] = -mevent.bytes + mem_list.append(crt_info) + end_profiler = max(end_profiler, crt_info['time']) + mem_list.sort(key=lambda tmp: (tmp.get('time', 0))) + i = 0 + total_size = 0 + while i < len(mem_list): + total_size += mem_list[i]['size'] + while i < len(mem_list) - 1 and mem_list[i]['time'] == mem_list[ + i + 1]['time']: + total_size += mem_list[i + 1]['size'] + i += 1 + + self._chrome_trace.emit_counter( + "Memory", "Memory", mem_list[i]['pid'], mem_list[i]['time'], + 0, total_size) + i += 1 + def generate_chrome_trace(self): self._allocate_pids() self._allocate_events() + self._allocate_memory_event() return self._chrome_trace.format_to_string()