From 376c2f01fc493418a4185e0ec520602c35cb8546 Mon Sep 17 00:00:00 2001 From: phlrain Date: Tue, 11 May 2021 11:54:33 +0000 Subject: [PATCH] add default attr; test=develop --- paddle/fluid/framework/attribute.h | 14 +++++++++++++- paddle/fluid/framework/hogwild_worker.cc | 6 +++++- paddle/fluid/framework/multi_trainer.cc | 1 + paddle/fluid/framework/op_proto_maker.cc | 1 + paddle/fluid/imperative/CMakeLists.txt | 2 ++ paddle/fluid/imperative/execution_context.h | 19 ++++++++++++++----- paddle/fluid/imperative/prepared_operator.cc | 7 +++++-- paddle/fluid/imperative/tests/test_layer.cc | 2 +- paddle/fluid/imperative/tracer.cc | 5 +++++ .../operators/elementwise/elementwise_op.h | 2 ++ .../fluid/operators/sync_batch_norm_op.cu.h | 4 +++- paddle/fluid/pybind/op_function.h | 2 +- 12 files changed, 53 insertions(+), 12 deletions(-) diff --git a/paddle/fluid/framework/attribute.h b/paddle/fluid/framework/attribute.h index 66b988ee1f..079ef7e2a2 100644 --- a/paddle/fluid/framework/attribute.h +++ b/paddle/fluid/framework/attribute.h @@ -208,7 +208,8 @@ Attribute GetAttrValue(const proto::OpDesc::Attr& attr_desc); class AttrReader { public: - explicit AttrReader(const AttributeMap& attrs) : attrs_(attrs) {} + explicit AttrReader(const AttributeMap& attrs, const AttributeMap& default_attrs = {} ) + : attrs_(attrs), default_attrs_(default_attrs) {} template inline const T& Get(const std::string& name) const { @@ -224,6 +225,7 @@ class AttrReader { private: const AttributeMap& attrs_; + const AttributeMap& default_attrs_; }; // check whether a value(attribute) fit a certain limit @@ -406,6 +408,14 @@ class OpAttrChecker { return default_values_map; } + void InitDefaultMap() { + for (const auto& checker : attr_checkers_) { + checker(&default_values_map_, true); + } + } + + const AttributeMap& default_attr_map() const { return default_values_map_; } + void RecordExplicitCheckerNum() { explicit_checker_num_ = attr_checkers_.size(); } @@ -413,6 +423,8 @@ class OpAttrChecker { private: std::vector attr_checkers_; + AttributeMap default_values_map_; + // in order to improve the efficiency of dynamic graph mode, // we divede the attribute into explicit type and implicit type. // for explicit attribute, we mean the attribute added in the customized diff --git a/paddle/fluid/framework/hogwild_worker.cc b/paddle/fluid/framework/hogwild_worker.cc index d8639643f2..5de8d0fcdd 100644 --- a/paddle/fluid/framework/hogwild_worker.cc +++ b/paddle/fluid/framework/hogwild_worker.cc @@ -194,11 +194,14 @@ void HogwildWorker::TrainFilesWithProfiler() { void HogwildWorker::TrainFiles() { platform::SetNumThreads(1); - + + std::cerr << "1!!!!!" << std::endl; // how to accumulate fetched values here device_reader_->Start(); int cur_batch; + int i = 0; while ((cur_batch = device_reader_->Next()) > 0) { + i++; for (auto &op : ops_) { bool need_skip = false; for (auto t = 0u; t < skip_ops_.size(); ++t) { @@ -215,6 +218,7 @@ void HogwildWorker::TrainFiles() { PrintFetchVars(); thread_scope_->DropKids(); } + std::cerr << "total bacth " << i << std::endl; #if defined PADDLE_WITH_PSCORE if (thread_barrier_) { paddle::distributed::Communicator::GetInstance()->BarrierTriggerDecrement(); diff --git a/paddle/fluid/framework/multi_trainer.cc b/paddle/fluid/framework/multi_trainer.cc index ff8e71b92e..a9e15ee175 100644 --- a/paddle/fluid/framework/multi_trainer.cc +++ b/paddle/fluid/framework/multi_trainer.cc @@ -124,6 +124,7 @@ Scope* MultiTrainer::GetWorkerScope(int thread_id) { void MultiTrainer::Run() { VLOG(3) << "Going to run"; + LOG(ERROR) << "multi run " << thread_num_ << "\t" << debug_; for (int thidx = 0; thidx < thread_num_; ++thidx) { if (!debug_) { threads_.push_back( diff --git a/paddle/fluid/framework/op_proto_maker.cc b/paddle/fluid/framework/op_proto_maker.cc index 0b9fd0a47e..d86b555d9a 100644 --- a/paddle/fluid/framework/op_proto_maker.cc +++ b/paddle/fluid/framework/op_proto_maker.cc @@ -66,6 +66,7 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto, op_checker_ = attr_checker; Make(); op_checker_->RecordExplicitCheckerNum(); + op_checker_->InitDefaultMap(); AddAttr(OpRoleAttrName(), "The role of this operator") .InEnum( diff --git a/paddle/fluid/imperative/CMakeLists.txt b/paddle/fluid/imperative/CMakeLists.txt index a24c0ac09c..5cfa2c2278 100644 --- a/paddle/fluid/imperative/CMakeLists.txt +++ b/paddle/fluid/imperative/CMakeLists.txt @@ -28,4 +28,6 @@ endif(NOT WIN32) cc_library(gradient_accumulator SRCS gradient_accumulator.cc DEPS blas operator lod_tensor selected_rows selected_rows_functor var_type_traits layer math_function) +cc_binary(tracer_test SRCS tracer_test.cc DEPS tracer layer op_registry python pybind ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS} profiler ) + add_subdirectory(tests) diff --git a/paddle/fluid/imperative/execution_context.h b/paddle/fluid/imperative/execution_context.h index 398b1292e2..885c5d5ed0 100644 --- a/paddle/fluid/imperative/execution_context.h +++ b/paddle/fluid/imperative/execution_context.h @@ -35,11 +35,13 @@ class DygraphExecutionContext : public framework::ExecutionContext { const framework::RuntimeContext& ctx, const NameVarMap& var_base_map_in, const NameVarMap& var_base_map_out, - const framework::AttributeMap& attrs) + const framework::AttributeMap& attrs, + const framework::AttributeMap& default_attrs) : ExecutionContext(op, scope, device_context, ctx), var_base_map_in_(var_base_map_in), var_base_map_out_(var_base_map_out), - attrs_(attrs) {} + attrs_(attrs), + default_attrs_(default_attrs){} std::string InputName(const std::string& name) const override { auto it = var_base_map_in_.find(name); @@ -92,16 +94,22 @@ class DygraphExecutionContext : public framework::ExecutionContext { } bool HasAttr(const std::string& name) const override { - return attrs_.count(name) != 0; + return attrs_.count(name) != 0 || default_attrs_.count( name ); } const framework::AttributeMap& Attrs() const override { return attrs_; } const framework::Attribute& GetAttr(const std::string& name) const override { auto it = attrs_.find(name); - + + bool find = ( it != attrs_.end() ); + if( it == attrs_.end() ) + { + it = default_attrs_.find( name ); + find = ( it != default_attrs_.end() ); + } PADDLE_ENFORCE_NE( - it, attrs_.end(), + find, false, platform::errors::NotFound("can not find [%s] in attrs", name)); return it->second; @@ -192,6 +200,7 @@ class DygraphExecutionContext : public framework::ExecutionContext { const NameVarMap& var_base_map_in_; const NameVarMap& var_base_map_out_; const framework::AttributeMap& attrs_; + const framework::AttributeMap& default_attrs_; }; } // namespace imperative diff --git a/paddle/fluid/imperative/prepared_operator.cc b/paddle/fluid/imperative/prepared_operator.cc index 2a3b6424d4..5c05a5642a 100644 --- a/paddle/fluid/imperative/prepared_operator.cc +++ b/paddle/fluid/imperative/prepared_operator.cc @@ -104,10 +104,11 @@ PreparedOp PrepareImpl(const NameVarMap& ins, } #endif + //auto *attr_checker = op_->Info().Checker(); // 1. get expected kernel key auto expected_kernel_key = op.GetExpectedKernelType(DygraphExecutionContext( - op, framework::Scope(), *dev_ctx, ctx, ins, outs, attrs)); + op, framework::Scope(), *dev_ctx, ctx, ins, outs, attrs, {} )) ; VLOG(3) << "expected_kernel_key:" << expected_kernel_key; // 2. check if op[type] has kernel registered. @@ -172,8 +173,10 @@ static void PreparedOpRunImpl( static_cast(op).InferShape( &infer_shape_ctx); + auto *attr_checker = op.Info().Checker(); func(DygraphExecutionContext(op, scope, *dev_ctx, ctx, ins, outs, - attrs)); + attrs, + attr_checker->default_attr_map() )); /** * [ Why need handle complex gradient to real gradient? ] diff --git a/paddle/fluid/imperative/tests/test_layer.cc b/paddle/fluid/imperative/tests/test_layer.cc index 4a30ffb7e3..93602ddaa3 100644 --- a/paddle/fluid/imperative/tests/test_layer.cc +++ b/paddle/fluid/imperative/tests/test_layer.cc @@ -358,7 +358,7 @@ TEST(test_layer, test_dygraph_execution_context) { framework::Scope scope; DygraphExecutionContext dy_exe_context( - *(op.get()), scope, *dev_ctx, ctx, ins, outs, concat_att_map); + *(op.get()), scope, *dev_ctx, ctx, ins, outs, concat_att_map, {}); ASSERT_EQ(dy_exe_context.InputSize("X"), 1u); ASSERT_EQ(dy_exe_context.InputName("X"), "vin"); diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index 608cc407d5..c9567a98a7 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -149,11 +149,16 @@ void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins, } } auto op = framework::OpRegistry::CreateOp(type, {}, {}, {}, false); + + const auto& op_info = op->Info(); auto* attr_checker = op_info.Checker(); + if (attr_checker) { attr_checker->Check(&attrs, true); } + + NameVarBaseMap new_ins = ins; if (enable_autocast_) { diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index 6ec73b02ad..3cec5388d1 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -109,6 +109,7 @@ class ElementwiseOp : public framework::OperatorWithKernel { auto input_data_type = OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y"); +/* #ifdef PADDLE_WITH_MKLDNN if (this->CanMKLDNNBeUsed(ctx, input_data_type)) { return framework::OpKernelType(input_data_type, ctx.GetPlace(), @@ -116,6 +117,7 @@ class ElementwiseOp : public framework::OperatorWithKernel { framework::LibraryType::kMKLDNN); } #endif +*/ return framework::OpKernelType(input_data_type, ctx.GetPlace()); } diff --git a/paddle/fluid/operators/sync_batch_norm_op.cu.h b/paddle/fluid/operators/sync_batch_norm_op.cu.h index d08a34ade7..e785e89aeb 100644 --- a/paddle/fluid/operators/sync_batch_norm_op.cu.h +++ b/paddle/fluid/operators/sync_batch_norm_op.cu.h @@ -186,12 +186,14 @@ void SyncBatchNormFunctor(const framework::ExecutionContext &ctx, framework::DataLayout::kNHWC><<>>( x_d, N, H * W * D, C, stats); } - + + /* Tensor c_g_st; auto *c_g_st_d = c_g_st.mutable_data>( {2 * C + 1}, platform::CPUPlace()); auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace()); memory::Copy(platform::CPUPlace(), c_g_st_d, gplace, stats, bytes, 0); + */ #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) auto *comm = dev_ctx.nccl_comm(); diff --git a/paddle/fluid/pybind/op_function.h b/paddle/fluid/pybind/op_function.h index 0c45753121..4e5ddac3ec 100644 --- a/paddle/fluid/pybind/op_function.h +++ b/paddle/fluid/pybind/op_function.h @@ -177,4 +177,4 @@ static inline void HandleViewBetweenInputAndOutput( } // namespace paddle // This include must be the last line -#include "paddle/fluid/pybind/op_function_impl.h" +#include "paddle/fluid/pybind/op_function_impl_new.h" -- GitLab