diff --git a/cmake/external/mkldnn.cmake b/cmake/external/mkldnn.cmake index baf253df2755657b01b67c410f63b7d8422d4df3..9fea9ca05bce921b105a0f092c321b0c3a55c63c 100644 --- a/cmake/external/mkldnn.cmake +++ b/cmake/external/mkldnn.cmake @@ -45,7 +45,7 @@ IF(${CBLAS_PROVIDER} STREQUAL "MKLML") ELSE() MESSAGE(FATAL_ERROR "Should enable MKLML when build MKLDNN") ENDIF() -SET(MKLDNN_FLAG "-Wno-error=strict-overflow -Wno-error=unused-result") +SET(MKLDNN_FLAG "-Wno-error=strict-overflow -Wno-error=unused-result -Wno-error=array-bounds") SET(MKLDNN_FLAG "${MKLDNN_FLAG} -Wno-unused-result -Wno-unused-value") SET(MKLDNN_CFLAG "${CMAKE_C_FLAGS} ${MKLDNN_FLAG}") SET(MKLDNN_CXXFLAG "${CMAKE_CXX_FLAGS} ${MKLDNN_FLAG}") @@ -54,7 +54,7 @@ ExternalProject_Add( ${EXTERNAL_PROJECT_LOG_ARGS} DEPENDS ${MKLDNN_DEPENDS} GIT_REPOSITORY "https://github.com/01org/mkl-dnn.git" - GIT_TAG "64e03a1939e0d526aa8e9f2e3f7dc0ad8d372944" + GIT_TAG "21fb5f2af1dd14e132af4f1b79160977ee487818" PREFIX ${MKLDNN_SOURCES_DIR} UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index b6b7af951093e4d721e5d0c99e7bb818c67af749..88a2c740e08f7c2c6ac831c65a0ef992064b3a61 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -174,6 +174,7 @@ paddle.fluid.layers.mean ArgSpec(args=['x', 'name'], varargs=None, keywords=None paddle.fluid.layers.mul ArgSpec(args=['x', 'y', 'x_num_col_dims', 'y_num_col_dims', 'name'], varargs=None, keywords=None, defaults=(1, 1, None)) paddle.fluid.layers.sigmoid_cross_entropy_with_logits ArgSpec(args=['x', 'label', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.maxout ArgSpec(args=['x', 'groups', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.space_to_depth ArgSpec(args=['x', 'blocksize', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.affine_grid ArgSpec(args=['theta', 'out_shape', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.sequence_reverse ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.affine_channel ArgSpec(args=['x', 'scale', 'bias', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(None, None, 'NCHW', None)) @@ -189,6 +190,7 @@ paddle.fluid.layers.batch ArgSpec(args=['reader', 'batch_size'], varargs=None, k paddle.fluid.layers.double_buffer ArgSpec(args=['reader', 'place', 'name'], varargs=None, keywords=None, defaults=(None, None)) paddle.fluid.layers.random_data_generator ArgSpec(args=['low', 'high', 'shapes', 'lod_levels', 'for_parallel'], varargs=None, keywords=None, defaults=(True,)) paddle.fluid.layers.py_reader ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True)) +paddle.fluid.layers.create_py_reader_by_data ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True)) paddle.fluid.layers.Preprocessor.__init__ ArgSpec(args=['self', 'reader', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.Preprocessor.block ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) paddle.fluid.layers.Preprocessor.inputs ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) diff --git a/paddle/fluid/framework/details/broadcast_op_handle_test.h b/paddle/fluid/framework/details/broadcast_op_handle_test.h index 1a2a9ac328c4a9b89bfb89106af81b9fb3ed3028..4305eb65733a7c871450949ce2c48cab013bac81 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle_test.h +++ b/paddle/fluid/framework/details/broadcast_op_handle_test.h @@ -37,8 +37,9 @@ struct TestBroadcastOpHandle { std::vector local_scopes_; std::vector param_scopes_; Scope g_scope_; - std::unique_ptr op_handle_; - std::vector> vars_; + OpHandleBase* op_handle_; + std::vector vars_; + std::vector> nodes_; std::vector place_list_; bool use_gpu_; #ifdef PADDLE_WITH_CUDA @@ -90,6 +91,7 @@ struct TestBroadcastOpHandle { } void InitBroadcastOp(size_t input_scope_idx) { + nodes_.clear(); for (size_t j = 0; j < place_list_.size(); ++j) { local_scopes_.push_back(&(g_scope_.NewScope())); Scope& local_scope = local_scopes_.back()->NewScope(); @@ -101,39 +103,39 @@ struct TestBroadcastOpHandle { } param_scopes_[input_scope_idx]->Var("input"); - std::unique_ptr n = - ir::CreateNodeForTest("node0", ir::Node::Type::kOperation); + nodes_.emplace_back( + ir::CreateNodeForTest("node0", ir::Node::Type::kOperation)); if (use_gpu_) { #ifdef PADDLE_WITH_CUDA - op_handle_.reset(new BroadcastOpHandle(n.get(), local_scopes_, - place_list_, nccl_ctxs_.get())); + op_handle_ = new BroadcastOpHandle(nodes_.back().get(), local_scopes_, + place_list_, nccl_ctxs_.get()); #else PADDLE_THROW("CUDA is not support."); #endif } else { #ifdef PADDLE_WITH_CUDA - op_handle_.reset(new BroadcastOpHandle(n.get(), local_scopes_, - place_list_, nccl_ctxs_.get())); + op_handle_ = new BroadcastOpHandle(nodes_.back().get(), local_scopes_, + place_list_, nccl_ctxs_.get()); #else - op_handle_.reset( - new BroadcastOpHandle(n.get(), local_scopes_, place_list_)); + op_handle_ = new BroadcastOpHandle(nodes_.back().get(), local_scopes_, + place_list_); #endif } - std::unique_ptr v = - ir::CreateNodeForTest("node1", ir::Node::Type::kVariable); - auto* in_var_handle = new VarHandle(v.get(), 1, input_scope_idx, "input", - place_list_[input_scope_idx]); + nodes_.emplace_back( + ir::CreateNodeForTest("node1", ir::Node::Type::kVariable)); + auto* in_var_handle = new VarHandle(nodes_.back().get(), 1, input_scope_idx, + "input", place_list_[input_scope_idx]); vars_.emplace_back(in_var_handle); op_handle_->AddInput(in_var_handle); // add dummy var - std::unique_ptr v2 = - ir::CreateNodeForTest("node2", ir::Node::Type::kVariable); - vars_.emplace_back(new DummyVarHandle(v2.get())); + nodes_.emplace_back( + ir::CreateNodeForTest("node2", ir::Node::Type::kVariable)); + vars_.emplace_back(new DummyVarHandle(nodes_.back().get())); DummyVarHandle* dummy_var_handle = - static_cast(vars_.back().get()); + static_cast(vars_.back()); dummy_var_handle->ClearGeneratedOp(); op_handle_->AddInput(dummy_var_handle); @@ -141,20 +143,20 @@ struct TestBroadcastOpHandle { if (!use_gpu_) { op_handle_->SetDeviceContext(place_list_[j], ctxs_[j].get()); } - std::unique_ptr v3 = - ir::CreateNodeForTest("node3", ir::Node::Type::kVariable); + nodes_.emplace_back( + ir::CreateNodeForTest("node3", ir::Node::Type::kVariable)); VarHandle* out_var_handle = - new VarHandle(v3.get(), 2, j, "out", place_list_[j]); + new VarHandle(nodes_.back().get(), 2, j, "out", place_list_[j]); vars_.emplace_back(out_var_handle); op_handle_->AddOutput(out_var_handle); } // add dummy var - std::unique_ptr v4 = - ir::CreateNodeForTest("node4", ir::Node::Type::kVariable); - vars_.emplace_back(new DummyVarHandle(v4.get())); + nodes_.emplace_back( + ir::CreateNodeForTest("node4", ir::Node::Type::kVariable)); + vars_.emplace_back(new DummyVarHandle(nodes_.back().get())); DummyVarHandle* out_dummy_var_handle = - static_cast(vars_.back().get()); + static_cast(vars_.back()); out_dummy_var_handle->ClearGeneratedOp(); op_handle_->AddOutput(out_dummy_var_handle); } diff --git a/paddle/fluid/framework/details/execution_strategy.h b/paddle/fluid/framework/details/execution_strategy.h index 5183be878eb49cccc68603c3fdd8023be5578036..15c496130c2b6c7643ff96661be09e5ac4870344 100644 --- a/paddle/fluid/framework/details/execution_strategy.h +++ b/paddle/fluid/framework/details/execution_strategy.h @@ -13,6 +13,7 @@ // limitations under the License. #pragma once +#include // for size_t namespace paddle { namespace framework { @@ -26,6 +27,7 @@ struct ExecutionStrategy { bool allow_op_delay_{false}; size_t num_iteration_per_drop_scope_{100}; ExecutorType type_{kDefault}; + bool dry_run_{false}; }; } // namespace details diff --git a/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc index 98fc390e72fab3701538fd6f974460fa5114fdb0..ca11c0083961f9b3d04e33113a0d685d508918f9 100644 --- a/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc @@ -16,6 +16,7 @@ #include #include "paddle/fluid/framework/details/fetch_op_handle.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" +#include "paddle/fluid/framework/ir/graph_helper.h" namespace paddle { namespace framework { @@ -32,13 +33,11 @@ FastThreadedSSAGraphExecutor::FastThreadedSSAGraphExecutor( pool_(strategy.num_threads_ + 1), // add one more thread for generate op_deps fetch_ctxs_(places) { - auto &ops = graph_->Get("ops"); - - for (auto &op : ops) { + for (auto &op : ir::FilterByNodeWrapper(*graph_)) { int dep = static_cast(op->NotReadyInputSize()); - op_deps_.emplace(op.get(), dep); + op_deps_.emplace(op, dep); if (dep == 0) { - bootstrap_ops_.emplace_back(op.get()); + bootstrap_ops_.emplace_back(op); } } @@ -54,13 +53,13 @@ FeedFetchList FastThreadedSSAGraphExecutor::Run( paddle::framework::FeedFetchList fetches; fetches.resize(fetch_tensors.size()); std::unordered_map> fetched_vars; - std::vector> fetch_ops; + std::vector fetch_ops; for (auto &fetch_var_name : fetch_tensors) { for (auto &var_map : graph_->Get("vars")) { auto it = var_map.find(fetch_var_name); if (it != var_map.end()) { - fetched_vars[fetch_var_name].push_back(it->second.rbegin()->get()); + fetched_vars[fetch_var_name].push_back(*it->second.rbegin()); } } } @@ -110,7 +109,10 @@ FeedFetchList FastThreadedSSAGraphExecutor::Run( complete_q->Pop(); } } - exception_.ReThrow(); + if (exception_.IsCaught()) { + ClearFetchOp(graph_.get(), &fetch_ops); + exception_.ReThrow(); + } } num_complete += num_comp; } @@ -128,7 +130,9 @@ void FastThreadedSSAGraphExecutor::RunOpAsync( size_t complete = 0; while (op_to_run != nullptr) { try { - op_to_run->Run(strategy_.use_cuda_); + if (LIKELY(!strategy_.dry_run_)) { + op_to_run->Run(strategy_.use_cuda_); + } ++complete; } catch (...) { exception_.Catch(std::current_exception()); diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index fe18b2060c5cd7e157374da53c5a985f70545ab7..648adae06facb504042d8286f6eab5d98e99c015 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -28,11 +28,7 @@ FetchOpHandle::FetchOpHandle(ir::Node *node, FeedFetchList *data, size_t offset, offset_(offset), local_scopes_(local_scopes) {} -FetchOpHandle::~FetchOpHandle() { - for (auto *input_var : inputs_) { - input_var->RemoveOutput(this, this->Node()); - } -} +FetchOpHandle::~FetchOpHandle() {} void FetchOpHandle::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) { PADDLE_THROW("Nobody should wait FetchOp. Unexpceted Error"); diff --git a/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc b/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc index 0f12bd2b4e857648342aeb5ad33b6c0fe01c9c73..541993c74332cc483a8b854a6b8f227c7c9a19a9 100644 --- a/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc +++ b/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc @@ -22,8 +22,10 @@ namespace details { struct TestFusedBroadcastOpHandle : TestBroadcastOpHandle { std::vector out_varnames_; + std::vector> nodes_; void InitFusedBroadcastOp(std::vector input_scope_idxes) { + nodes_.clear(); // initialize scope and var for (size_t i = 0; i < place_list_.size(); ++i) { local_scopes_.push_back(&(g_scope_.NewScope())); @@ -39,41 +41,41 @@ struct TestFusedBroadcastOpHandle : TestBroadcastOpHandle { } // create op handle node - std::unique_ptr n = - ir::CreateNodeForTest("fused_broadcast", ir::Node::Type::kOperation); + nodes_.emplace_back( + ir::CreateNodeForTest("fused_broadcast", ir::Node::Type::kOperation)); if (use_gpu_) { #ifdef PADDLE_WITH_CUDA - op_handle_.reset(new FusedBroadcastOpHandle( - n.get(), local_scopes_, place_list_, nccl_ctxs_.get())); + op_handle_ = new FusedBroadcastOpHandle( + nodes_.back().get(), local_scopes_, place_list_, nccl_ctxs_.get()); #else PADDLE_THROW("CUDA is not supported."); #endif } else { #ifdef PADDLE_WITH_CUDA - op_handle_.reset(new FusedBroadcastOpHandle( - n.get(), local_scopes_, place_list_, nccl_ctxs_.get())); + op_handle_ = new FusedBroadcastOpHandle( + nodes_.back().get(), local_scopes_, place_list_, nccl_ctxs_.get()); #else - op_handle_.reset( - new FusedBroadcastOpHandle(n.get(), local_scopes_, place_list_)); + op_handle_ = new FusedBroadcastOpHandle(nodes_.back().get(), + local_scopes_, place_list_); #endif } for (size_t i = 0; i < input_scope_idxes.size(); ++i) { // add input var handle - std::unique_ptr in_node = - ir::CreateNodeForTest("in_node" + i, ir::Node::Type::kVariable); + nodes_.emplace_back( + ir::CreateNodeForTest("in_node" + i, ir::Node::Type::kVariable)); VarHandle* in_var_handle = - new VarHandle(in_node.get(), 1, input_scope_idxes[i], "in_var" + i, - place_list_[input_scope_idxes[i]]); + new VarHandle(nodes_.back().get(), 1, input_scope_idxes[i], + "in_var" + i, place_list_[input_scope_idxes[i]]); vars_.emplace_back(in_var_handle); op_handle_->AddInput(in_var_handle); // add output var handle for (size_t j = 0; j < place_list_.size(); ++j) { - std::unique_ptr out_node = - ir::CreateNodeForTest("out_node" + i, ir::Node::Type::kVariable); - VarHandle* out_var_handle = - new VarHandle(out_node.get(), 2, j, "out_var" + i, place_list_[j]); + nodes_.emplace_back( + ir::CreateNodeForTest("out_node" + i, ir::Node::Type::kVariable)); + VarHandle* out_var_handle = new VarHandle( + nodes_.back().get(), 2, j, "out_var" + i, place_list_[j]); vars_.emplace_back(out_var_handle); op_handle_->AddOutput(out_var_handle); } diff --git a/paddle/fluid/framework/details/gather_op_handle_test.cc b/paddle/fluid/framework/details/gather_op_handle_test.cc index ed67e88ff6a7fe9efd93e5dfd4d7bdf4c43aac2e..e8cb7feb8bea92a7486b8a9d84ba4b9e2b93dbfb 100644 --- a/paddle/fluid/framework/details/gather_op_handle_test.cc +++ b/paddle/fluid/framework/details/gather_op_handle_test.cc @@ -31,9 +31,10 @@ struct TestGatherOpHandle { std::vector local_scopes_; std::vector param_scopes_; Scope g_scope_; - std::unique_ptr op_handle_; - std::vector> vars_; + OpHandleBase* op_handle_; + std::vector vars_; std::vector gpu_list_; + std::vector> nodes_; void WaitAll() { for (size_t j = 0; j < ctxs_.size(); ++j) { @@ -70,7 +71,7 @@ struct TestGatherOpHandle { } void InitGatherOp(size_t input_scope_idx) { - std::vector> nodes; + nodes_.clear(); for (size_t j = 0; j < gpu_list_.size(); ++j) { local_scopes_.push_back(&(g_scope_.NewScope())); Scope& local_scope = local_scopes_.back()->NewScope(); @@ -82,44 +83,45 @@ struct TestGatherOpHandle { } param_scopes_[input_scope_idx]->Var("out"); - nodes.emplace_back( + nodes_.emplace_back( ir::CreateNodeForTest("node", ir::Node::Type::kOperation).release()); - op_handle_.reset( - new GatherOpHandle(nodes.back().get(), local_scopes_, gpu_list_)); + op_handle_ = + new GatherOpHandle(nodes_.back().get(), local_scopes_, gpu_list_); // add input for (size_t j = 0; j < gpu_list_.size(); ++j) { op_handle_->SetDeviceContext(gpu_list_[j], ctxs_[j].get()); - nodes.emplace_back( + nodes_.emplace_back( ir::CreateNodeForTest("node1", ir::Node::Type::kVariable).release()); auto* in_var_handle = - new VarHandle(nodes.back().get(), 1, j, "input", gpu_list_[j]); + new VarHandle(nodes_.back().get(), 1, j, "input", gpu_list_[j]); vars_.emplace_back(in_var_handle); op_handle_->AddInput(in_var_handle); } // add dummy var - nodes.emplace_back( + nodes_.emplace_back( ir::CreateNodeForTest("node2", ir::Node::Type::kVariable).release()); - vars_.emplace_back(new DummyVarHandle(nodes.back().get())); + vars_.emplace_back(new DummyVarHandle(nodes_.back().get())); DummyVarHandle* in_dummy_var_handle = - static_cast(vars_.back().get()); + static_cast(vars_.back()); in_dummy_var_handle->ClearGeneratedOp(); op_handle_->AddInput(in_dummy_var_handle); // add output - nodes.emplace_back( + nodes_.emplace_back( ir::CreateNodeForTest("node3", ir::Node::Type::kVariable).release()); - auto* out_var_handle = new VarHandle(nodes.back().get(), 2, input_scope_idx, - "out", gpu_list_[input_scope_idx]); + auto* out_var_handle = + new VarHandle(nodes_.back().get(), 2, input_scope_idx, "out", + gpu_list_[input_scope_idx]); vars_.emplace_back(out_var_handle); op_handle_->AddOutput(out_var_handle); // add dummy var - nodes.emplace_back( + nodes_.emplace_back( ir::CreateNodeForTest("node4", ir::Node::Type::kVariable).release()); - vars_.emplace_back(new DummyVarHandle(nodes.back().get())); + vars_.emplace_back(new DummyVarHandle(nodes_.back().get())); DummyVarHandle* dummy_var_handle = - static_cast(vars_.back().get()); + static_cast(vars_.back()); op_handle_->AddOutput(dummy_var_handle); } diff --git a/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc b/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc index 169ce3ae7ca497e40d99b1c16633e35e1e4f1009..67aad9f94f088f4b50e1ce2728d83de98a3c60ad 100644 --- a/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc +++ b/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc @@ -16,6 +16,7 @@ #include "paddle/fluid/framework/details/computation_op_handle.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" #include "paddle/fluid/framework/details/op_graph_view.h" +#include "paddle/fluid/framework/ir/graph_helper.h" namespace paddle { namespace framework { @@ -35,10 +36,10 @@ static bool IsLockAndRecordEventFreeComputationOpHandle( std::unique_ptr ModifyOpLockAndRecordEventPass::ApplyImpl( std::unique_ptr ir_graph) const { - auto &all_ops = ir_graph->Get(kGraphOps); + auto all_ops = ir::FilterByNodeWrapper(*ir_graph); OpGraphView graph_view(all_ops); for (auto &op : all_ops) { - auto *compute_op = dynamic_cast(op.get()); + auto *compute_op = dynamic_cast(op); if (compute_op == nullptr) continue; bool is_lock_and_record_event_free = IsLockAndRecordEventFreeComputationOpHandle(compute_op, graph_view); diff --git a/paddle/fluid/framework/details/multi_devices_graph_check_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_check_pass.cc index c9c255864a2477ed29873f8521acce37fa928c06..c8ea18804630fea4ada98062256730dbf4c24860 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_check_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_check_pass.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/framework/details/multi_devices_graph_check_pass.h" #include #include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/graph_helper.h" namespace paddle { namespace framework { @@ -36,20 +37,20 @@ bool SSAGraghBuilderWithChecker::IsValidGraph(const ir::Graph *graph) const { for (auto &var_map : graph->Get(kGraphVars)) { for (auto &name_pair : var_map) { for (auto &version_pair : name_pair.second) { - insert_pending_var(version_pair.get()); + insert_pending_var(version_pair); } } } for (auto &var : graph->Get(kGraphDepVars)) { - insert_pending_var(var.get()); + insert_pending_var(var); } - for (auto &op : graph->Get(kGraphOps)) { + for (OpHandleBase *op : ir::FilterByNodeWrapper(*graph)) { if (op->Inputs().empty()) { - ready_ops.insert(op.get()); + ready_ops.insert(op); } else { - pending_ops.insert({op.get(), op.get()->NoDupInputSize()}); + pending_ops.insert({op, op->NoDupInputSize()}); } } @@ -89,6 +90,4 @@ bool SSAGraghBuilderWithChecker::IsValidGraph(const ir::Graph *graph) const { REGISTER_PASS(multi_devices_check_pass, paddle::framework::details::SSAGraghBuilderWithChecker) .RequireGraphAttr(paddle::framework::details::kGraphVars) - .RequireGraphAttr(paddle::framework::details::kGraphDepVars) - .RequireGraphAttr(paddle::framework::details::kGraphOps) - .RequireGraphAttr(paddle::framework::details::kShardedVarDevice); + .RequireGraphAttr(paddle::framework::details::kGraphDepVars); diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_pass.cc index f3819887a196a7c8bf35897467bb9d68b428094e..3dc177a8cb7a1e994aca5304240f1eb61ba23f02 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.cc @@ -34,7 +34,14 @@ namespace paddle { namespace framework { namespace details { + namespace { +// TODO(panyx0718): Clean this up as well. +// all operators. NOTE that even we use a vector here, the operators is +// unordered. +typedef std::vector GraphOps; +const char kGraphOps[] = "ops"; + void PolishGraphToSupportDataHazards(ir::Graph *graph) { for (auto &var_map : graph->Get(kGraphVars)) { for (auto &name_pair : var_map) { @@ -92,7 +99,7 @@ VarHandle *CreateOrGetLatestVarHandle(ir::Graph *graph, ir::Node *node, } var_holder.emplace_back(var); } else { - var = var_holder.rbegin()->get(); + var = *var_holder.rbegin(); } return var; } @@ -154,7 +161,7 @@ void MultiDevSSAGraphBuilder::CreateOpHandleIOs(ir::Graph *result, ir::Node *node, size_t place_id) const { auto p = places_[place_id]; - auto *op_handle = result->Get(kGraphOps).back().get(); + auto *op_handle = result->Get(kGraphOps).back(); op_handle->SetDeviceContext(p, platform::DeviceContextPool::Instance().Get(p)); @@ -303,7 +310,6 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( result.Set(kGraphVars, new GraphVars(places_.size())); result.Set(kGraphDepVars, new GraphDepVars); result.Set(kGraphOps, new GraphOps); - result.Set(kShardedVarDevice, new ShardedVarDevice); // find send/recv vars so that we can place the distributed training // related op in the place 0 @@ -317,11 +323,13 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( bool is_forwarding = true; bool is_dist_train = false; + std::unordered_map sharded_var_device; + for (ir::Node *node : sorted_ops) { if (boost::get( node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) == static_cast(OpRole::kRPC)) { - int op_dev_id = CreateRPCOp(&result, node); + int op_dev_id = CreateRPCOp(&result, node, &sharded_var_device); PADDLE_ENFORCE(op_dev_id != -1, "Can not schedule the RPC operator to the right place."); if (node->Op()->Type() == "recv") { @@ -337,7 +345,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( } else if (boost::get(node->Op()->GetAttr( OpProtoAndCheckerMaker::OpRoleAttrName())) == static_cast(OpRole::kDist)) { - int op_dev_id = CreateDistTrainOp(&result, node); + int op_dev_id = CreateDistTrainOp(&result, node, &sharded_var_device); if (node->Op()->Type() == "concat") { auto origin_param_name = node->Op()->OutputArgumentNames()[0]; bcast_var_name_set[op_dev_id].emplace(origin_param_name); @@ -356,12 +364,11 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( // the block. is_forwarding = false; } else { - int op_dev_id = GetOpDeviceID(result, node); + int op_dev_id = GetOpDeviceID(result, node, sharded_var_device); if (op_dev_id != -1) { // This op only runs on one specific device. CreateComputationalOp(&result, node, op_dev_id); for (ir::Node *n : node->outputs) { - graph->Get(kShardedVarDevice) - .emplace(n->Name(), op_dev_id); + sharded_var_device.emplace(n->Name(), op_dev_id); } } else { // This op runs on all devices, and its output may have parameter's @@ -398,8 +405,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( case BuildStrategy::ReduceStrategy::kReduce: cur_device_id = GetAppropriateDeviceID({g_name}); CreateReduceOp(&result, g_name, cur_device_id); - graph->Get(kShardedVarDevice) - .emplace(g_name, cur_device_id); + sharded_var_device.emplace(g_name, cur_device_id); if (!is_dist_train) { bcast_var_name_set[cur_device_id].emplace(p_name); } @@ -458,7 +464,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( * Only variables should be the leaves of graph. */ AddOutputToLeafOps(&result); - PADDLE_ENFORCE(!ir::HasCircle(result)); + result.Erase(kGraphOps); return graph; } @@ -498,7 +504,7 @@ void MultiDevSSAGraphBuilder::CreateBroadcastOp(ir::Graph *result, result->Get(kGraphOps).emplace_back(op_handle); auto *in = - result->Get(kGraphVars).at(src_dev_id).at(p_name).back().get(); + result->Get(kGraphVars).at(src_dev_id).at(p_name).back(); op_handle->AddInput(in); for (size_t i = 0; i < places_.size(); ++i) { @@ -535,7 +541,7 @@ void MultiDevSSAGraphBuilder::CreateFusedBroadcastOp( for (size_t dev_id = 0; dev_id < bcast_varnames.size(); ++dev_id) { for (auto &p_name : bcast_varnames[dev_id]) { auto *in = - result->Get(kGraphVars).at(dev_id).at(p_name).back().get(); + result->Get(kGraphVars).at(dev_id).at(p_name).back(); op_handle->AddInput(in); for (size_t out_dev_id = 0; out_dev_id < places_.size(); ++out_dev_id) { auto &p = places_[out_dev_id]; @@ -571,7 +577,7 @@ void MultiDevSSAGraphBuilder::InsertAllReduceOp(ir::Graph *result, result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), local_scopes_, places_)); #endif - auto *op_handle = result->Get(kGraphOps).back().get(); + auto *op_handle = result->Get(kGraphOps).back(); for (size_t i = 0; i < places_.size(); ++i) { auto &p = places_[i]; @@ -579,7 +585,7 @@ void MultiDevSSAGraphBuilder::InsertAllReduceOp(ir::Graph *result, auto &vars = result->Get(kGraphVars)[i][og]; PADDLE_ENFORCE(!vars.empty()); auto &prev_grad = vars.back(); - op_handle->AddInput(prev_grad.get()); + op_handle->AddInput(prev_grad); auto var = new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable), @@ -600,14 +606,14 @@ void MultiDevSSAGraphBuilder::InsertDataBalanceOp( result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation), local_scopes_, places_)); #endif - auto *op_handle = result->Get(kGraphOps).back().get(); + auto *op_handle = result->Get(kGraphOps).back(); for (size_t i = 0; i < places_.size(); ++i) { auto &p = places_[i]; SetCommunicationContext(op_handle, p); for (const std::string &d_name : datas) { auto &vars = result->Get(kGraphVars)[i][d_name]; PADDLE_ENFORCE(!vars.empty()); - op_handle->AddInput(vars.back().get()); + op_handle->AddInput(vars.back()); auto var = new VarHandle( result->CreateEmptyNode(d_name, ir::Node::Type::kVariable), vars.size(), i, d_name, p); @@ -617,8 +623,9 @@ void MultiDevSSAGraphBuilder::InsertDataBalanceOp( } } -int MultiDevSSAGraphBuilder::GetOpDeviceID(const ir::Graph &graph, - ir::Node *node) const { +int MultiDevSSAGraphBuilder::GetOpDeviceID( + const ir::Graph &graph, ir::Node *node, + const std::unordered_map &sharded_var_device) const { if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) { return -1; } @@ -631,16 +638,22 @@ int MultiDevSSAGraphBuilder::GetOpDeviceID(const ir::Graph &graph, node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName())); PADDLE_ENFORCE_EQ(param_grad.size(), 2U); - int dev_id = GetVarDeviceID(graph, param_grad[1]); + int dev_id = GetVarDeviceID(graph, param_grad[1], sharded_var_device); PADDLE_ENFORCE_NE(dev_id, -1, "dev_id should not be -1.[%s, %s, %s]", node->Op()->Type(), param_grad[0], param_grad[1]); return dev_id; } -int MultiDevSSAGraphBuilder::GetVarDeviceID(const ir::Graph &graph, - const std::string &varname) const { - auto &sharded_var_device = graph.Get(kShardedVarDevice); +int MultiDevSSAGraphBuilder::GetVarDeviceID( + const ir::Graph &graph, const std::string &varname, + const std::unordered_map &sharded_var_device) const { auto got = sharded_var_device.find(varname); + if (got == sharded_var_device.end()) { + auto pos = varname.find(framework::kNewGradSuffix); + if (pos != std::string::npos) { + got = sharded_var_device.find(varname.substr(0, pos)); + } + } return got == sharded_var_device.end() ? -1 : got->second; } @@ -690,7 +703,7 @@ VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result, result->CreateEmptyNode("reduce", ir::Node::Type::kOperation), local_scopes_, places_)); #endif - auto *op_handle = result->Get(kGraphOps).back().get(); + auto *op_handle = result->Get(kGraphOps).back(); for (size_t i = 0; i < places_.size(); ++i) { auto &p = places_[i]; @@ -698,7 +711,7 @@ VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result, auto &vars = result->Get(kGraphVars)[i][og]; PADDLE_ENFORCE(!vars.empty()); auto &prev_grad = vars.back(); - op_handle->AddInput(prev_grad.get()); + op_handle->AddInput(prev_grad); } auto &vars = result->Get(kGraphVars)[dst_dev_id][og]; auto var = @@ -709,8 +722,9 @@ VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result, return var; } -int MultiDevSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result, - ir::Node *node) const { +int MultiDevSSAGraphBuilder::CreateDistTrainOp( + ir::Graph *result, ir::Node *node, + std::unordered_map *sharded_var_device) const { int op_dev_id = -1; std::vector input_var_names; std::vector output_var_names; @@ -725,23 +739,22 @@ int MultiDevSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result, node->Op()->Type() == "split_selected_rows" || node->Op()->Type() == "split_ids") { // TODO(paddle-dev): getting the first var is not safe. - op_dev_id = GetVarDeviceID(*result, input_var_names[0]); + op_dev_id = + GetVarDeviceID(*result, input_var_names[0], *sharded_var_device); if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) { op_dev_id = GetAppropriateDeviceID(input_var_names); for (auto &varname : input_var_names) { - result->Get(kShardedVarDevice) - .emplace(varname, op_dev_id); + sharded_var_device->emplace(varname, op_dev_id); } } for (auto &varname : output_var_names) { - result->Get(kShardedVarDevice) - .emplace(varname, op_dev_id); + sharded_var_device->emplace(varname, op_dev_id); } } else if (node->Op()->Type() == "concat") { - op_dev_id = GetVarDeviceID(*result, input_var_names[0]); + op_dev_id = + GetVarDeviceID(*result, input_var_names[0], *sharded_var_device); for (auto &varname : output_var_names) { - result->Get(kShardedVarDevice) - .emplace(varname, op_dev_id); + sharded_var_device->emplace(varname, op_dev_id); } } else { LOG(ERROR) << "got unexpected dist op: " << node->Op()->Type(); @@ -759,14 +772,14 @@ int MultiDevSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result, } void SetOpInputsAllPlaces(ir::Graph *result, ir::Node *node, int num_places) { - auto *op_handle = result->Get(kGraphOps).back().get(); + auto *op_handle = result->Get(kGraphOps).back(); for (ir::Node *input : node->inputs) { VarHandle *var = nullptr; for (int place_offset = 0; place_offset < num_places; ++place_offset) { auto &var_holders = result->Get(kGraphVars)[place_offset]; auto &var_holder = var_holders[input->Name()]; if (!var_holder.empty()) { - var = var_holder.rbegin()->get(); + var = *var_holder.rbegin(); op_handle->AddInput(var); } } @@ -774,12 +787,14 @@ void SetOpInputsAllPlaces(ir::Graph *result, ir::Node *node, int num_places) { } // Create RPC related op handles that connects its in ops and out ops. -int MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result, - ir::Node *node) const { +int MultiDevSSAGraphBuilder::CreateRPCOp( + ir::Graph *result, ir::Node *node, + std::unordered_map *sharded_var_device) const { int op_dev_id = -1; if (node->Op()->Type() == "send") { // TODO(paddle-dev): getting the first var is not safe. - op_dev_id = GetVarDeviceID(*result, node->inputs[0]->Name()); + op_dev_id = + GetVarDeviceID(*result, node->inputs[0]->Name(), *sharded_var_device); PADDLE_ENFORCE(!ir::IsControlDepVar(*node->inputs[0]), "This hack no longer holds, please fix."); // the variable name which contains .block means it was splited by @@ -797,11 +812,9 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result, VLOG(10) << "send grad " << input_var_names[0] << " origin " << send_param_grad[1] << " place: " << op_dev_id; for (auto &varname : input_var_names) { - result->Get(kShardedVarDevice) - .emplace(varname, op_dev_id); + sharded_var_device->emplace(varname, op_dev_id); } - result->Get(kShardedVarDevice) - .emplace(send_param_grad[1], op_dev_id); + sharded_var_device->emplace(send_param_grad[1], op_dev_id); } } else if (node->Op()->Type() == "recv") { std::vector output_var_names; @@ -811,7 +824,8 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result, auto recv_param_grad = boost::get>( node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName())); if (recv_param_grad.size() == 2U) { - op_dev_id = GetVarDeviceID(*result, recv_param_grad[1]); + op_dev_id = + GetVarDeviceID(*result, recv_param_grad[1], *sharded_var_device); VLOG(10) << "recv param " << recv_param_grad[0] << " get grad place: " << recv_param_grad[1] << " place: " << op_dev_id; @@ -819,8 +833,7 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result, op_dev_id = GetAppropriateDeviceID(output_var_names); } for (auto &varname : output_var_names) { - result->Get(kShardedVarDevice) - .emplace(varname, op_dev_id); + sharded_var_device->emplace(varname, op_dev_id); } } else { // send_barrier, fetch_barrier will run on place 0; @@ -839,7 +852,7 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result, // send_barrier, recv, fetch_barrier's inputs are deps var, get them from // all places auto p = places_[op_dev_id]; - auto *op_handle = result->Get(kGraphOps).back().get(); + auto *op_handle = result->Get(kGraphOps).back(); op_handle->SetDeviceContext(p, platform::DeviceContextPool::Instance().Get(p)); @@ -847,7 +860,8 @@ int MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result, for (ir::Node *output : node->outputs) { int outvar_dev_id = op_dev_id; if (node->Op()->Type() == "fetch_barrier") { - outvar_dev_id = GetVarDeviceID(*result, output->Name()); + outvar_dev_id = + GetVarDeviceID(*result, output->Name(), *sharded_var_device); PADDLE_ENFORCE_NE(outvar_dev_id, -1); } p = places_[outvar_dev_id]; diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.h b/paddle/fluid/framework/details/multi_devices_graph_pass.h index 03b2de2f04da4bac8d342a76c80fd12beaeba4b7..f3ec2d29415240b7012f458070223469d0947166 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.h +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.h @@ -44,12 +44,18 @@ class MultiDevSSAGraphBuilder : public ir::Pass { mutable platform::NCCLContextMap *nccl_ctxs_; #endif - int GetVarDeviceID(const ir::Graph &graph, const std::string &varname) const; + int GetVarDeviceID( + const ir::Graph &graph, const std::string &varname, + const std::unordered_map &sharded_var_device) const; bool IsScaleLossOp(ir::Node *node) const; - int CreateRPCOp(ir::Graph *result, ir::Node *node) const; - int CreateDistTrainOp(ir::Graph *result, ir::Node *node) const; + int CreateRPCOp( + ir::Graph *result, ir::Node *node, + std::unordered_map *sharded_var_device) const; + int CreateDistTrainOp( + ir::Graph *result, ir::Node *node, + std::unordered_map *sharded_var_device) const; std::vector FindDistTrainSendVars( const std::vector &nodes) const; @@ -69,7 +75,9 @@ class MultiDevSSAGraphBuilder : public ir::Pass { void CreateComputationalOp(ir::Graph *result, ir::Node *node, int dev_id) const; - int GetOpDeviceID(const ir::Graph &graph, ir::Node *node) const; + int GetOpDeviceID( + const ir::Graph &graph, ir::Node *node, + const std::unordered_map &sharded_var_device) const; void InsertAllReduceOp(ir::Graph *result, const std::string &og) const; diff --git a/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc index 361c91dc78c08a2cbf84ee88211d389c1e2312e5..8f92f0948d7d397ab0f20c01eae9e313f739adec 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/framework/details/multi_devices_graph_print_pass.h" #include #include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/graph_helper.h" namespace paddle { namespace framework { @@ -62,7 +63,7 @@ void GraphvizSSAGraphPrinter::Print(const ir::Graph &graph, }); size_t op_id = 0; - for (auto &op : graph.Get(kGraphOps)) { + for (auto &op : ir::FilterByNodeWrapper(graph)) { std::string op_name = "op_" + std::to_string(op_id++); sout << op_name << " [label=\"" << op->Name() << "\", shape=rect]" << std::endl; diff --git a/paddle/fluid/framework/details/multi_devices_helper.h b/paddle/fluid/framework/details/multi_devices_helper.h index 175c5a9950be69d7bf6ae9e386af762007a18a51..1a2b75fbc0c28984ce5cf00e0a2ce0f804349bb1 100644 --- a/paddle/fluid/framework/details/multi_devices_helper.h +++ b/paddle/fluid/framework/details/multi_devices_helper.h @@ -35,23 +35,14 @@ namespace details { // The outside vector is the device vector. Each element of this vector is a // map from variable name to variables. The variables, who have the same name, // will have a differsent version. The offset in the -// `std::vector>` is the version of varaibles. -typedef std::vector< - std::unordered_map>>> +// `std::vector` is the version of varaibles. +typedef std::vector>> GraphVars; const char kGraphVars[] = "vars"; // aux variables to represent dependency. Useful to resolve data hazard. -typedef std::unordered_set> GraphDepVars; +typedef std::unordered_set GraphDepVars; const char kGraphDepVars[] = "dep_vars"; - -// all operators. NOTE that even we use a vector here, the operators is -// unordered. -typedef std::vector> GraphOps; -const char kGraphOps[] = "ops"; - -typedef std::unordered_map ShardedVarDevice; -const char kShardedVarDevice[] = "sharded_var_device"; } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/op_graph_view.cc b/paddle/fluid/framework/details/op_graph_view.cc index 65dafd376f7c687410270e35f105ff595fe78f59..4838c4198ff35ba3fb562f3a7c0563ee60179e3b 100644 --- a/paddle/fluid/framework/details/op_graph_view.cc +++ b/paddle/fluid/framework/details/op_graph_view.cc @@ -20,19 +20,16 @@ namespace paddle { namespace framework { namespace details { -OpGraphView::OpGraphView( - const std::vector> &ops) { - Build(ops); -} +OpGraphView::OpGraphView(const std::vector &ops) { Build(ops); } -void OpGraphView::Build(const std::vector> &ops) { +void OpGraphView::Build(const std::vector &ops) { for (auto &op : ops) { - preceding_ops_[op.get()]; - pending_ops_[op.get()]; + preceding_ops_[op]; + pending_ops_[op]; for (auto &var : op->Outputs()) { for (auto &pending_op : var->PendingOps()) { - preceding_ops_[pending_op].insert(op.get()); - pending_ops_[op.get()].insert(pending_op); + preceding_ops_[pending_op].insert(op); + pending_ops_[op].insert(pending_op); } } } @@ -41,8 +38,6 @@ void OpGraphView::Build(const std::vector> &ops) { "There are duplicate ops in graph."); } -size_t OpGraphView::OpNumber() const { return preceding_ops_.size(); } - std::unordered_set OpGraphView::AllOps() const { std::unordered_set ret; for (auto &pair : preceding_ops_) { @@ -60,12 +55,6 @@ void OpGraphView::EnforceHasOp(OpHandleBase *op) const { op == nullptr ? "nullptr" : op->DebugString()); } -const std::unordered_set &OpGraphView::PrecedingOps( - OpHandleBase *op) const { - EnforceHasOp(op); - return preceding_ops_.at(op); -} - const std::unordered_set &OpGraphView::PendingOps( OpHandleBase *op) const { EnforceHasOp(op); diff --git a/paddle/fluid/framework/details/op_graph_view.h b/paddle/fluid/framework/details/op_graph_view.h index 398c019be00a6ff5f5b39fdcbe97339341b1685b..afb3e8e59461eeba10d7027fc70b89cc170c1805 100644 --- a/paddle/fluid/framework/details/op_graph_view.h +++ b/paddle/fluid/framework/details/op_graph_view.h @@ -26,21 +26,16 @@ namespace details { class OpGraphView { public: - explicit OpGraphView(const std::vector> &ops); - - size_t OpNumber() const; + explicit OpGraphView(const std::vector &ops); std::unordered_set AllOps() const; - const std::unordered_set &PrecedingOps( - OpHandleBase *op) const; - const std::unordered_set &PendingOps(OpHandleBase *op) const; bool HasOp(OpHandleBase *op) const; private: - void Build(const std::vector> &ops); + void Build(const std::vector &ops); void EnforceHasOp(OpHandleBase *op) const; std::unordered_map> diff --git a/paddle/fluid/framework/details/op_handle_base.h b/paddle/fluid/framework/details/op_handle_base.h index d09b94a3fd32952985a37cf4246c7640d2db4f56..ba12ca3c61c05b3e856fffa8353d4ec5bf79bc39 100644 --- a/paddle/fluid/framework/details/op_handle_base.h +++ b/paddle/fluid/framework/details/op_handle_base.h @@ -31,7 +31,10 @@ constexpr char kLocalExecScopeName[] = "@LCOAL_SCOPE@"; // It's responsible for populating necessary fields of ir::Node. class OpHandleBase { public: - explicit OpHandleBase(ir::Node *node) : node_(node) {} + // Owned by `node`. No need to be deleted explicitly. + explicit OpHandleBase(ir::Node *node) : node_(node) { + node_->WrappedBy(this); + } virtual ~OpHandleBase(); diff --git a/paddle/fluid/framework/details/reduce_op_handle_test.cc b/paddle/fluid/framework/details/reduce_op_handle_test.cc index 3a9a58412391b188c5e804b41fa47b3607a36bd1..72299c0bfa916d3b92e1c5020ddd69dadad3701d 100644 --- a/paddle/fluid/framework/details/reduce_op_handle_test.cc +++ b/paddle/fluid/framework/details/reduce_op_handle_test.cc @@ -30,8 +30,8 @@ struct TestReduceOpHandle { Scope g_scope_; std::vector local_scopes_; std::vector param_scopes_; - std::unique_ptr op_handle_; - std::vector> vars_; + OpHandleBase *op_handle_; + std::vector vars_; std::vector gpu_list_; std::vector> ctxs_; diff --git a/paddle/fluid/framework/details/reference_count_pass.cc b/paddle/fluid/framework/details/reference_count_pass.cc index 0b994ced7f751f056fec076e3dea8d14d0bed991..08783fb5f8b18329c9167edb0dac39b7dd42a746 100644 --- a/paddle/fluid/framework/details/reference_count_pass.cc +++ b/paddle/fluid/framework/details/reference_count_pass.cc @@ -19,6 +19,7 @@ #include "paddle/fluid/framework/details/computation_op_handle.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" #include "paddle/fluid/framework/details/reference_count_pass.h" +#include "paddle/fluid/framework/ir/graph_helper.h" namespace paddle { namespace framework { @@ -71,14 +72,13 @@ std::unique_ptr ReferenceCountPass::ApplyImpl( // Step 2: Find all variables in non-computation ops which refers to variables // in computation ops std::unordered_set names; - std::unordered_map> + std::unordered_map compute_ref_cnt_map; auto get_ref_cnts_from_compute_op = [&]( - const std::unique_ptr &op, - const std::vector &vars) { + OpHandleBase *op, const std::vector &vars) { std::vector var_names_in_op; - auto *compute_op = dynamic_cast(op.get()); + auto *compute_op = dynamic_cast(op); if (compute_op == nullptr || !platform::is_gpu_place(compute_op->GetPlace())) return var_names_in_op; @@ -121,9 +121,8 @@ std::unique_ptr ReferenceCountPass::ApplyImpl( }; auto update_ref_cnts_from_non_compute_op = [&]( - const std::unique_ptr &op, - const std::vector &vars) { - if (dynamic_cast(op.get()) != nullptr) return; + OpHandleBase *op, const std::vector &vars) { + if (dynamic_cast(op) != nullptr) return; for (VarHandleBase *var_handle_base : vars) { auto *var_handle = dynamic_cast(var_handle_base); if (var_handle == nullptr || !var_handle->Node()->IsVar()) continue; @@ -151,21 +150,21 @@ std::unique_ptr ReferenceCountPass::ApplyImpl( ref_cnt_node, next_compute_op->GetScope(), place, {var_name}, gcs[place.device].get(), cur_ref_cnts[place.device].get()); AddDependencyBetween(next_compute_op, ref_cnt_handle, graph.get()); - compute_ref_cnt_map[next_compute_op].reset(ref_cnt_handle); + compute_ref_cnt_map[next_compute_op] = ref_cnt_handle; } } } } }; - auto &all_ops = graph->Get(kGraphOps); + auto all_ops = ir::FilterByNodeWrapper(*graph); for (auto &op : all_ops) { auto in_var_names = get_ref_cnts_from_compute_op(op, op->Inputs()); auto out_var_names = get_ref_cnts_from_compute_op(op, op->Outputs()); if (in_var_names.empty() && out_var_names.empty()) continue; in_var_names.insert(in_var_names.end(), out_var_names.begin(), out_var_names.end()); - auto *compute_op = dynamic_cast(op.get()); + auto *compute_op = dynamic_cast(op); auto place = boost::get(compute_op->GetPlace()); ir::Node *ref_cnt_node = graph->CreateEmptyNode("reference_count", ir::Node::Type::kOperation); @@ -173,7 +172,7 @@ std::unique_ptr ReferenceCountPass::ApplyImpl( ref_cnt_node, compute_op->GetScope(), place, in_var_names, gcs[place.device].get(), cur_ref_cnts[place.device].get()); AddDependencyBetween(compute_op, ref_cnt_handle, graph.get()); - compute_ref_cnt_map[compute_op].reset(ref_cnt_handle); + compute_ref_cnt_map[compute_op] = ref_cnt_handle; } for (auto &op : all_ops) { @@ -181,11 +180,11 @@ std::unique_ptr ReferenceCountPass::ApplyImpl( update_ref_cnts_from_non_compute_op(op, op->Outputs()); } - std::vector> new_all_ops; + std::vector new_all_ops; new_all_ops.reserve(compute_ref_cnt_map.size() + all_ops.size()); for (auto &op : all_ops) { new_all_ops.emplace_back(std::move(op)); - auto it = compute_ref_cnt_map.find(new_all_ops.back().get()); + auto it = compute_ref_cnt_map.find(new_all_ops.back()); if (it != compute_ref_cnt_map.end()) { // Add LeafNode to ReferenceCountOpHandle auto *dummy_leaf = new DummyVarHandle(graph->CreateControlDepVar()); diff --git a/paddle/fluid/framework/details/ssa_graph_executor.cc b/paddle/fluid/framework/details/ssa_graph_executor.cc index 780da5478ff34ecd7096d0ef62b72bf1088dd221..af2cbd5c876fdd7c27cd679f7e9412d1b0604ecc 100644 --- a/paddle/fluid/framework/details/ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/ssa_graph_executor.cc @@ -19,14 +19,16 @@ namespace framework { namespace details { SSAGraphExecutor::~SSAGraphExecutor() {} -void ClearFetchOp(ir::Graph* graph, - std::vector>* fetch_ops) { +void ClearFetchOp(ir::Graph* graph, std::vector* fetch_ops) { if (fetch_ops->empty()) return; for (auto& op : *fetch_ops) { for (auto& out_var : op->Node()->outputs) { graph->RemoveNode(out_var); } + for (auto& in_var : op->Inputs()) { + in_var->RemoveOutput(op, op->Node()); + } graph->RemoveNode(op->Node()); } fetch_ops->clear(); diff --git a/paddle/fluid/framework/details/ssa_graph_executor.h b/paddle/fluid/framework/details/ssa_graph_executor.h index d5cf7737d565c523995e6685b73c57e5a6f0197b..860eaa25b58e4579ad792ff18618de3b90707e8d 100644 --- a/paddle/fluid/framework/details/ssa_graph_executor.h +++ b/paddle/fluid/framework/details/ssa_graph_executor.h @@ -38,8 +38,7 @@ class SSAGraphExecutor { virtual FeedFetchList Run(const std::vector& fetch_tensors) = 0; }; -void ClearFetchOp(ir::Graph* graph, - std::vector>* fetch_ops); +void ClearFetchOp(ir::Graph* graph, std::vector* fetch_ops); } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index dc63effd1b7c8fe5bb3fc91058eb855e552d3926..677a2937945b03fa577317cb4f26e09354d06957 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" +#include "paddle/fluid/framework/ir/graph_helper.h" #include "paddle/fluid/platform/profiler.h" namespace paddle { @@ -51,25 +52,25 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( for (auto &var_map : graph_->Get(details::kGraphVars)) { for (auto &name_pair : var_map) { for (auto &version_pair : name_pair.second) { - InsertPendingVar(&pending_vars, ready_vars.get(), version_pair.get()); + InsertPendingVar(&pending_vars, ready_vars.get(), version_pair); } } } for (auto &var : graph_->Get(details::kGraphDepVars)) { - InsertPendingVar(&pending_vars, ready_vars.get(), var.get()); + InsertPendingVar(&pending_vars, ready_vars.get(), var); } - for (auto &op : graph_->Get(details::kGraphOps)) { + for (auto &op : ir::FilterByNodeWrapper(*graph_)) { if (op->Inputs().empty()) { // Special case, Op has no input. - ready_ops.insert(op.get()); + ready_ops.insert(op); } else { - InsertPendingOp(&pending_ops, op.get()); + InsertPendingOp(&pending_ops, op); } } // Step 2. Insert FetchOps - std::vector> fetch_ops; - std::unordered_set> fetch_dependencies; + std::vector fetch_ops; + std::unordered_set fetch_dependencies; FeedFetchList fetch_data(fetch_tensors.size()); InsertFetchOps(fetch_tensors, &fetch_ops, &fetch_dependencies, &pending_ops, @@ -109,6 +110,7 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( for (auto &run_op_future : run_op_futures_) { run_op_future.wait(); } + ClearFetchOp(graph_.get(), &fetch_ops); exception_holder_.ReThrow(); } else { continue; @@ -140,8 +142,8 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( void ThreadedSSAGraphExecutor::InsertFetchOps( const std::vector &fetch_tensors, - std::vector> *fetch_ops, - std::unordered_set> *fetch_dependencies, + std::vector *fetch_ops, + std::unordered_set *fetch_dependencies, std::unordered_map *pending_ops, std::unordered_set *pending_vars, BlockingQueue *ready_vars, FeedFetchList *fetch_data) { @@ -151,7 +153,7 @@ void ThreadedSSAGraphExecutor::InsertFetchOps( for (auto &var_map : graph_->Get(details::kGraphVars)) { auto it = var_map.find(fetch_var_name); if (it != var_map.end()) { - fetched_vars[fetch_var_name].push_back(it->second.rbegin()->get()); + fetched_vars[fetch_var_name].push_back(*it->second.rbegin()); } } } @@ -211,7 +213,9 @@ void ThreadedSSAGraphExecutor::RunOp( if (VLOG_IS_ON(10)) { VLOG(10) << op << " " << op->Name() << " : " << op->DebugString(); } - op->Run(strategy_.use_cuda_); + if (LIKELY(!strategy_.dry_run_)) { + op->Run(strategy_.use_cuda_); + } VLOG(10) << op << " " << op->Name() << " Done "; running_ops_--; ready_var_q->Extend(op->Outputs()); diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h index dbb0b498d995a897b109bd4ef98521b2193276ed..24da56c09e3e0f3894d58e5af8838c98e3e1e67c 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h @@ -48,7 +48,7 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor { // Use topological sort algorithm FeedFetchList Run(const std::vector &fetch_tensors) override; - ~ThreadedSSAGraphExecutor() {} + ~ThreadedSSAGraphExecutor() final = default; private: void RunOp(const std::shared_ptr> &ready_var_q, @@ -70,13 +70,13 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor { BlockingQueue *ready_vars, VarHandleBase *var) const; - void InsertFetchOps( - const std::vector &fetch_tensors, - std::vector> *fetch_ops, - std::unordered_set> *fetch_dependencies, - std::unordered_map *pending_ops, - std::unordered_set *pending_vars, - BlockingQueue *ready_vars, FeedFetchList *fetch_data); + void InsertFetchOps(const std::vector &fetch_tensors, + std::vector *fetch_ops, + std::unordered_set *fetch_dependencies, + std::unordered_map *pending_ops, + std::unordered_set *pending_vars, + BlockingQueue *ready_vars, + FeedFetchList *fetch_data); private: ExecutionStrategy strategy_; diff --git a/paddle/fluid/framework/details/var_handle.cc b/paddle/fluid/framework/details/var_handle.cc index 5457870e9ff5d7cf67c9c7076b9aae94eeada779..30da029ca2a90e7faa6288557ff2f1aeb21cc1c6 100644 --- a/paddle/fluid/framework/details/var_handle.cc +++ b/paddle/fluid/framework/details/var_handle.cc @@ -20,6 +20,8 @@ namespace details { VarHandleBase::~VarHandleBase() {} +VarHandle::~VarHandle() { VLOG(4) << "deleting var handle " << DebugString(); } + std::string VarHandle::DebugString() const { std::stringstream ss; ss << name_ << ":" << place_; @@ -27,6 +29,10 @@ std::string VarHandle::DebugString() const { } std::string DummyVarHandle::DebugString() const { return node_->Name(); } + +DummyVarHandle::~DummyVarHandle() { + VLOG(4) << "deleting dummy var handle " << DebugString(); +} } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/var_handle.h b/paddle/fluid/framework/details/var_handle.h index a1f458c660ce9f73bc9ac2ed194091ad0b8f8400..3b007d7b1a52df765a2dbd41939f8f865123cb43 100644 --- a/paddle/fluid/framework/details/var_handle.h +++ b/paddle/fluid/framework/details/var_handle.h @@ -35,7 +35,10 @@ class OpHandleBase; // A variable can only be generated by a single operator. i.e. // This is a single assignment graph. struct VarHandleBase { - explicit VarHandleBase(ir::Node* node) : node_(node) {} + // Owned by `node`. No need to be deleted explicitly. + explicit VarHandleBase(ir::Node* node) : node_(node) { + node_->WrappedBy(this); + } virtual ~VarHandleBase(); @@ -94,6 +97,8 @@ struct VarHandleBase { struct VarHandle : public VarHandleBase { explicit VarHandle(ir::Node* node) : VarHandleBase(node) {} + virtual ~VarHandle(); + std::string DebugString() const override; VarHandle(ir::Node* node, size_t version, size_t scope_index, @@ -121,6 +126,8 @@ struct VarHandle : public VarHandleBase { struct DummyVarHandle : public VarHandleBase { explicit DummyVarHandle(ir::Node* node) : VarHandleBase(node) {} + virtual ~DummyVarHandle(); + std::string DebugString() const override; }; diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index 28231a53bad50fe9f19cfe3e73c3dc09aa3762cf..4cf973253cc4f1f22d2fc578a1ac3a8c95e479c9 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -53,6 +53,7 @@ set(GLOB_PASS_LIB ${PASS_LIBRARY} CACHE INTERNAL "Global PASS library") cc_library(pass_builder SRCS pass_builder.cc DEPS pass) +cc_test(node_test SRCS node_test.cc DEPS node) cc_test(pass_test SRCS pass_test.cc DEPS graph pass graph_helper) cc_test(graph_test SRCS graph_test.cc DEPS graph graph_helper op_registry) cc_test(graph_helper_test SRCS graph_helper_test.cc DEPS graph graph_helper op_registry) diff --git a/paddle/fluid/framework/ir/graph.h b/paddle/fluid/framework/ir/graph.h index 9d7aa5d32deb274fbf29481b0d4754c05d1e21b5..8830638ec8b70c3fcaaa83c2c3c819e2cc8ab795 100644 --- a/paddle/fluid/framework/ir/graph.h +++ b/paddle/fluid/framework/ir/graph.h @@ -102,6 +102,15 @@ class Graph { attr_dels_[attr_name] = []() {}; } + template + void Erase(const std::string &attr_name) { + PADDLE_ENFORCE(attrs_.count(attr_name) != 0, "%s not set in the graph", + attr_name); + attr_dels_[attr_name](); + attrs_.erase(attr_name); + attr_dels_.erase(attr_name); + } + const std::unordered_set &Nodes() const { return node_set_; } // Create a normal variable with non-null VarDesc. diff --git a/paddle/fluid/framework/ir/graph_helper.h b/paddle/fluid/framework/ir/graph_helper.h index ec46b38c01b8c369ab37b4fbd5497ec120d8db91..8d92c406689ab3a97596a8666ceb452aec4be170 100644 --- a/paddle/fluid/framework/ir/graph_helper.h +++ b/paddle/fluid/framework/ir/graph_helper.h @@ -37,6 +37,15 @@ std::vector TopologySortOperations(const Graph &graph); std::map> BuildOperationAdjList( const Graph &graph); +template +std::vector FilterByNodeWrapper(const Graph &graph) { + std::vector ret; + for (ir::Node *n : graph.Nodes()) { + if (n->IsWrappedBy()) ret.push_back(&n->Wrapper()); + } + return ret; +} + } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/node.h b/paddle/fluid/framework/ir/node.h index d6d42f5e92080aa57445e2d6ce59aa3faa89d22d..eedb375cf46165ebb09af56e5ab052a0327f1d0c 100644 --- a/paddle/fluid/framework/ir/node.h +++ b/paddle/fluid/framework/ir/node.h @@ -15,7 +15,10 @@ limitations under the License. */ #pragma once #include +#include +#include #include + #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/var_desc.h" #include "paddle/fluid/platform/macros.h" @@ -24,9 +27,33 @@ namespace paddle { namespace framework { namespace ir { -// Node should normally created by Graph::CreateXXXNode(). +// Node should only created by Graph::CreateXXXNode(). +// 1. Every Node should be part of a graph. No dangling Node exists. +// 2. Node only contains members necessary for building graph structure. +// It doesn't contain other unrelated members, such as device, etc. +// +// Sometimes, for specific usages, Node needs to have additional members, +// such as device_placement, version in order to be executed. It is suggested +// to use composition pattern. +// +// class RunnableOp { +// RunnableOp(ir::Node* n) : n_(n) { n_.WrappedBy(this); } +// +// int any_thing_; +// } +// +// RunnableOp is owned by the ir::Node that composes it. In other words. +// ir::Node will be responsible for deleting RunnableOp, say, when ir::Node +// is deleted from the graph. class Node { public: + virtual ~Node() { + if (!wrapper_.empty()) { + VLOG(4) << "ir::Node deleting a wrapper node " << Name(); + wrapper_deleter_(); + } + } + enum class Type { kOperation, kVariable }; static constexpr char kControlDepVarName[] = "__control_var"; @@ -44,6 +71,29 @@ class Node { return op_desc_.get(); } + // Set the `wrapper` that wraps the Node. `wrapper` is owned by Node. + template + void WrappedBy(T* wrapper) { + if (!wrapper_.empty()) { + wrapper_deleter_(); + } + wrapper_ = wrapper; + wrapper_deleter_ = [wrapper]() { delete wrapper; }; + wrapper_type_ = std::type_index(typeid(T)); + } + + // Return a reference to the `wrapper`. + template + T& Wrapper() { + return *boost::any_cast(wrapper_); + } + + // Test if the Node is wrapped by type T. + template + bool IsWrappedBy() { + return std::type_index(typeid(T)) == wrapper_type_; + } + // Please don't use this API! int id() const { return id_; } @@ -95,6 +145,11 @@ class Node { static int count_; // Please don't use this API or make this public. static void ResetId() { count_ = 0; } + + boost::any wrapper_; + std::function wrapper_deleter_; + std::type_index wrapper_type_ = std::type_index(typeid(void)); + DISABLE_COPY_AND_ASSIGN(Node); }; diff --git a/paddle/fluid/framework/ir/node_test.cc b/paddle/fluid/framework/ir/node_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..694efadda078169c993457181c00f7b357a09e87 --- /dev/null +++ b/paddle/fluid/framework/ir/node_test.cc @@ -0,0 +1,80 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "gtest/gtest.h" +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/pass.h" + +namespace paddle { +namespace framework { +namespace ir { + +class RunnableOp { + public: + RunnableOp(Node* node, bool* alive) : node_(node), alive_(alive) { + node_->WrappedBy(this); + } + + virtual ~RunnableOp() { *alive_ = false; } + + private: + Node* node_; + bool* alive_; +}; + +class RunnableOp2 { + public: + RunnableOp2(Node* node, bool* alive) : node_(node), alive_(alive) { + node_->WrappedBy(this); + } + + virtual ~RunnableOp2() { *alive_ = false; } + + private: + Node* node_; + bool* alive_; +}; + +TEST(NodeTest, Basic) { + bool alive1 = true; + bool alive2 = true; + std::unique_ptr n1(CreateNodeForTest("n1", Node::Type::kVariable)); + std::unique_ptr n2(CreateNodeForTest("n2", Node::Type::kVariable)); + + EXPECT_FALSE(n1->IsWrappedBy()); + EXPECT_FALSE(n1->IsWrappedBy()); + EXPECT_FALSE(n2->IsWrappedBy()); + EXPECT_FALSE(n2->IsWrappedBy()); + + new RunnableOp(n1.get(), &alive1); + new RunnableOp2(n2.get(), &alive2); + + EXPECT_TRUE(n1->IsWrappedBy()); + EXPECT_FALSE(n1->IsWrappedBy()); + EXPECT_FALSE(n2->IsWrappedBy()); + EXPECT_TRUE(n2->IsWrappedBy()); + + EXPECT_TRUE(alive1); + EXPECT_TRUE(alive2); + + n1.reset(nullptr); + n2.reset(nullptr); + EXPECT_FALSE(alive1); + EXPECT_FALSE(alive2); +} + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 45fc36c70633204dbfadbd10757c08b009d2cc74..73886ed304188d95e1d6a4a49f8ce54168ae41ce 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -358,7 +358,7 @@ static bool VarIsTensor(const Variable& var) { return var.IsType() || var.IsType(); } -const Tensor* GetTensorFromVar(const Variable& var) { +const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) { if (var.IsType()) { return static_cast(&(var.Get())); } else if (var.IsType()) { @@ -369,7 +369,7 @@ const Tensor* GetTensorFromVar(const Variable& var) { } } -static Tensor* GetMutableTensorFromVar(Variable* var) { +Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) { if (var->IsType()) { return var->GetMutable(); } else if (var->IsType()) { @@ -414,8 +414,7 @@ bool ExecutionContext::HasOutput(const std::string& name) const { template <> const Tensor* ExecutionContext::Input(const std::string& name) const { - auto* var = InputVar(name); - return var == nullptr ? nullptr : GetTensorFromVar(*var); + return Input(name); } template <> @@ -425,17 +424,21 @@ const std::vector ExecutionContext::MultiInput( std::vector res; res.reserve(names.size()); std::transform(names.begin(), names.end(), std::back_inserter(res), - [&](const std::string& sub_name) { + [&](const std::string& sub_name) -> const Tensor* { auto var = scope_.FindVar(sub_name); - return var == nullptr ? nullptr : GetTensorFromVar(*var); + if (var == nullptr) return nullptr; + PADDLE_ENFORCE( + var->IsType(), + "%s should be LoDTensor, but the received type is %s", + sub_name, var->Type().name()); + return &(var->Get()); }); return res; } template <> Tensor* ExecutionContext::Output(const std::string& name) const { - auto var = OutputVar(name); - return var == nullptr ? nullptr : GetMutableTensorFromVar(var); + return Output(name); } template <> @@ -445,10 +448,14 @@ std::vector ExecutionContext::MultiOutput( std::vector res; res.reserve(names.size()); std::transform(names.begin(), names.end(), std::back_inserter(res), - [&](const std::string& sub_name) { + [&](const std::string& sub_name) -> Tensor* { auto var = scope_.FindVar(sub_name); - return var == nullptr ? nullptr - : GetMutableTensorFromVar(var); + if (var == nullptr) return nullptr; + PADDLE_ENFORCE( + var->IsType(), + "%s should be LoDTensor, but the received type is %s", + sub_name, var->Type().name()); + return var->GetMutable(); }); return res; } @@ -768,11 +775,12 @@ void OperatorWithKernel::TransferInplaceVarsBack( const Scope& transfer_scope) const { for (auto& var_name : inplace_vars) { VLOG(3) << "share inplace var " + var_name + " back to it's original scope"; - auto* original_tensor = GetMutableTensorFromVar(scope.FindVar(var_name)); + auto* original_tensor = + GetMutableLoDTensorOrSelectedRowsValueFromVar(scope.FindVar(var_name)); auto* var = transfer_scope.FindVar(var_name); PADDLE_ENFORCE(var != nullptr, "The var[%s] should not be nullptr", var_name); - auto* transformed_tensor = GetTensorFromVar(*var); + auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var); original_tensor->ShareDataWith(*transformed_tensor); } } @@ -789,7 +797,7 @@ Scope* OperatorWithKernel::TryTransferData( continue; } - auto* tensor_in = GetTensorFromVar(*var); + auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var); if (!tensor_in->IsInitialized()) { continue; } diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 96ad3205235b921a7cf60ed674a8350f74d18509..40b0130b265471a1288d966c4cbcd4f0e1bdb9f1 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -54,6 +54,9 @@ constexpr char kGradVarSuffix[] = "@GRAD"; /// Variables with this suffix are supposed to be filled up with zeros. constexpr char kZeroVarSuffix[] = "@ZERO"; +/// Variables with this suffix are the new Gradient. +constexpr char kNewGradSuffix[] = "@NEWGRAD@"; + // define some kernel priority /* Define multiple kernel type fallback order*/ extern std::vector> kKernelPriority; @@ -63,7 +66,8 @@ inline std::string GradVarName(const std::string& var_name) { } proto::VarType::Type GetDataTypeOfVar(const Variable* var); -const Tensor* GetTensorFromVar(const Variable& var); +const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var); +Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var); class OperatorBase; class ExecutionContext; @@ -224,7 +228,7 @@ class ExecutionContext { std::vector res; res.reserve(names.size()); std::transform(names.begin(), names.end(), std::back_inserter(res), - [&](const std::string& sub_name) { + [&](const std::string& sub_name) -> const T* { auto var = scope_.FindVar(sub_name); return var == nullptr ? nullptr : &var->Get(); }); @@ -237,7 +241,7 @@ class ExecutionContext { std::vector res; res.reserve(names.size()); std::transform(names.begin(), names.end(), std::back_inserter(res), - [&](const std::string& sub_name) { + [&](const std::string& sub_name) -> T* { auto var = scope_.FindVar(sub_name); return var == nullptr ? nullptr : var->GetMutable(); }); diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index a45b9ec7a20ac3629d182f009b735d4d82fb5dc2..dfb107688ad7281765049cd9849d56b8a61bdd37 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -38,9 +38,20 @@ class ParallelExecutorPrivate { explicit ParallelExecutorPrivate(const std::vector &places) : places_(places) {} + ~ParallelExecutorPrivate() { + if (own_local_scope_) { + for (size_t i = 1; i < local_scopes_.size(); ++i) { + // Skip the first scope, since it is the global scope. + Scope *local_scope = local_scopes_[i]; + if (global_scope_->HasKid(local_scope)) { + global_scope_->DeleteScope(local_scope); + } + } + } + } std::vector places_; std::vector local_scopes_; - Scope *global_scope_; + Scope *global_scope_; // not owned std::unique_ptr executor_; #ifdef PADDLE_WITH_CUDA @@ -306,16 +317,6 @@ ParallelExecutor::~ParallelExecutor() { for (auto &p : member_->places_) { platform::DeviceContextPool::Instance().Get(p)->Wait(); } - - if (member_->own_local_scope_) { - for (size_t i = 1; i < member_->local_scopes_.size(); ++i) { - Scope *local_scope = member_->local_scopes_[i]; - if (member_->global_scope_->HasKid(local_scope)) { - member_->global_scope_->DeleteScope(local_scope); - } - } - } - // member_ must be destructed before gcs_ since the destructor of // ReferenceCountOpHandle use raw pointers of gcs_ inside. member_.reset(); diff --git a/paddle/fluid/inference/analysis/analyzer.cc b/paddle/fluid/inference/analysis/analyzer.cc index ef4142f334e503380dc7ccd74c348404ffe52ee6..559b3b6d2140a33ed6846562600a307efbf74161 100644 --- a/paddle/fluid/inference/analysis/analyzer.cc +++ b/paddle/fluid/inference/analysis/analyzer.cc @@ -101,6 +101,7 @@ Analyzer::Analyzer() { Register("manager1", new DfgPassManagerImpl); } void Analyzer::Run(Argument* argument) { std::vector passes; + passes.push_back("graph_viz_pass"); // add graphviz for debug. #ifdef PADDLE_WITH_MKLDNN if (use_mkldnn_) { VLOG(3) << "Adding MKL-DNN placement pass"; @@ -110,13 +111,13 @@ void Analyzer::Run(Argument* argument) { // infer_clean_graph_pass should be the first default pass // after mkldnn_placement_pass. passes.push_back("infer_clean_graph_pass"); + passes.push_back("graph_viz_pass"); // add graphviz for debug. for (auto& pass : ir_passes_) { if (!disabled_ir_passes_.count(pass)) { passes.push_back(pass); passes.push_back("graph_viz_pass"); // add graphviz for debug. } } - passes.push_back("graph_viz_pass"); argument->Set(kFluidToIrPassesAttr, new std::vector(passes)); for (auto& x : data_) { diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index 49a9ebe3ddec1e4fd59ae1155a706859e249d25c..fd05c967774b336275f4c7bd98313bc1d750502f 100644 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -37,8 +37,8 @@ if(WITH_TESTING) ARGS --word2vec_dirname=${WORD2VEC_MODEL_DIR} --book_dirname=${PYTHON_TESTS_DIR}/book) set_tests_properties(test_api_impl PROPERTIES DEPENDS test_image_classification) endif() -cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS analysis_predictor ${inference_deps} paddle_inference_api - ARGS --dirname=${PYTHON_TESTS_DIR}/book) +cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS analysis_predictor ${inference_deps} + ARGS --dirname=${WORD2VEC_MODEL_DIR}) if(WITH_GPU AND TENSORRT_FOUND) cc_library(paddle_inference_tensorrt_subgraph_engine diff --git a/paddle/fluid/inference/api/analysis_predictor_tester.cc b/paddle/fluid/inference/api/analysis_predictor_tester.cc index 13c25da1b52742e6114b294847c21ce735b9fc21..f75c45f3a0438bc437e716160af8c5eab5b10fce 100644 --- a/paddle/fluid/inference/api/analysis_predictor_tester.cc +++ b/paddle/fluid/inference/api/analysis_predictor_tester.cc @@ -24,7 +24,7 @@ using contrib::AnalysisConfig; TEST(AnalysisPredictor, ZeroCopy) { AnalysisConfig config; - config.model_dir = FLAGS_dirname + "/word2vec.inference.model"; + config.model_dir = FLAGS_dirname; config.use_feed_fetch_ops = false; auto predictor = CreatePaddlePredictor(config); diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 242f72e2cade63c89ad9643c69043f5804749d46..1869322a3f2e35307a972715fef4ccc22826441e 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -296,7 +296,6 @@ op_library(cos_sim_op DEPS cos_sim_functor) op_library(parallel_do_op DEPS executor) op_library(unsqueeze_op DEPS reshape_op) op_library(squeeze_op DEPS reshape_op) -op_library(extract_rows_op DEPS memory) op_library(flatten_op DEPS reshape_op) op_library(sequence_pad_op DEPS sequence_padding) op_library(unstack_op DEPS stack_op) diff --git a/paddle/fluid/operators/adagrad_op.cc b/paddle/fluid/operators/adagrad_op.cc index a3ef9ad9f91f1f626bd33876693ecc17ad76b96b..c88297ff544ddb0e5a97452a8ad2e8f9f77825ba 100644 --- a/paddle/fluid/operators/adagrad_op.cc +++ b/paddle/fluid/operators/adagrad_op.cc @@ -119,8 +119,8 @@ struct SparseAdagradFunctor { auto* grad_merge_data = grad_merge.mutable_value()->template data(); // 2. m += g_m * g_m - math::scatter::Mul sqare_func; - auto grad_square = sqare_func(context, grad_merge, grad_merge); + auto grad_square = + SquareSelectedRows(context, grad_merge); math::SelectedRowsAddToTensor functor; functor(context, grad_square, moment); diff --git a/paddle/fluid/operators/adagrad_op.cu b/paddle/fluid/operators/adagrad_op.cu index b25268786d622bc7a94117849763833e528bef48..b99b33343d36fbb7f6b1a2928e142ca615b238b3 100644 --- a/paddle/fluid/operators/adagrad_op.cu +++ b/paddle/fluid/operators/adagrad_op.cu @@ -84,8 +84,8 @@ struct SparseAdagradFunctor { auto* grad_merge_data = grad_merge.mutable_value()->template data(); framework::Vector merge_rows(grad_merge.rows()); // 2. m += g_m * g_m - math::scatter::Mul sqare_func; - auto grad_square = sqare_func(context, grad_merge, grad_merge); + auto grad_square = + SquareSelectedRows(context, grad_merge); math::SelectedRowsAddToTensor functor; functor(context, grad_square, moment); diff --git a/paddle/fluid/operators/adagrad_op.h b/paddle/fluid/operators/adagrad_op.h index 0a16ce00f71586ef55007c3753e024be29d0ed56..9f6ef391696aa8718be71ae945e746b876813d94 100644 --- a/paddle/fluid/operators/adagrad_op.h +++ b/paddle/fluid/operators/adagrad_op.h @@ -28,6 +28,20 @@ struct SparseAdagradFunctor { framework::Tensor *moment, framework::Tensor *param); }; +template +framework::SelectedRows SquareSelectedRows( + const DeviceContext &context, const framework::SelectedRows &input) { + framework::SelectedRows out; + out.set_rows(input.rows()); + out.set_height(input.height()); + out.mutable_value()->mutable_data(input.value().dims(), + context.GetPlace()); + auto e_out = framework::EigenVector::Flatten(*(out.mutable_value())); + auto e_in = framework::EigenVector::Flatten(input.value()); + e_out.device(*context.eigen_device()) = e_in.square(); + return out; +} + template class AdagradOpKernel : public framework::OpKernel { public: diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index 5a2ac66cd242d773d172243e07f213cc39a2841c..3a486efbd3063f1843d1227a6f4c841ee9ccda4b 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -375,8 +375,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { auto src_md = platform::MKLDNNMemDesc( src_tz, platform::MKLDNNGetDataType(), chosen_memory_format); auto weights_md = platform::MKLDNNMemDesc( - weights_tz, platform::MKLDNNGetDataType(), - (g == 1) ? chosen_memory_format : mkldnn::memory::format::goihw); + weights_tz, platform::MKLDNNGetDataType(), chosen_memory_format); std::vector bias_tz; // TODO(mgallus): avoid empty vector creation. // Currently used whenever bias is != nullptr. auto dst_md = platform::MKLDNNMemDesc( diff --git a/paddle/fluid/operators/elementwise_add_op.h b/paddle/fluid/operators/elementwise_add_op.h index c60cb1f92e99329d52f6ed39dccde406a5f83563..9edbdbefe76600dc4bf937d95e70d11450206cd4 100644 --- a/paddle/fluid/operators/elementwise_add_op.h +++ b/paddle/fluid/operators/elementwise_add_op.h @@ -28,9 +28,9 @@ struct AddFunctor { }; template -void default_elementwise_add(const framework::ExecutionContext& ctx, - const framework::Tensor* x, - const framework::Tensor* y, framework::Tensor* z) { +void default_elementwise_add(const framework::ExecutionContext &ctx, + const framework::Tensor *x, + const framework::Tensor *y, framework::Tensor *z) { int axis = ctx.Attr("axis"); ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, AddFunctor(), z); @@ -40,9 +40,9 @@ template typename std::enable_if< std::is_floating_point::value && std::is_same::value>::type -elementwise_add(const framework::ExecutionContext& ctx, - const framework::Tensor* x, const framework::Tensor* y, - framework::Tensor* z) { +elementwise_add(const framework::ExecutionContext &ctx, + const framework::Tensor *x, const framework::Tensor *y, + framework::Tensor *z) { auto eigen_x = framework::EigenVector::Flatten(*x); auto eigen_y = framework::EigenVector::Flatten(*y); auto eigen_z = framework::EigenVector::Flatten(*z); @@ -55,21 +55,20 @@ template typename std::enable_if< !std::is_floating_point::value || !std::is_same::value>::type -elementwise_add(const framework::ExecutionContext& ctx, - const framework::Tensor* x, const framework::Tensor* y, - framework::Tensor* z) { +elementwise_add(const framework::ExecutionContext &ctx, + const framework::Tensor *x, const framework::Tensor *y, + framework::Tensor *z) { default_elementwise_add(ctx, x, y, z); } template class ElementwiseAddKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { - using Tensor = framework::Tensor; + void Compute(const framework::ExecutionContext &ctx) const override { + auto *x = ctx.Input("X"); + auto *y = ctx.Input("Y"); + auto *z = ctx.Output("Out"); - const auto x = ctx.Input("X"); - const auto y = ctx.Input("Y"); - auto z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); auto dims_equal = x->dims() == y->dims(); @@ -87,13 +86,13 @@ struct IdentityGrad { }; template -void default_elementwise_add_grad(const framework::ExecutionContext& ctx, - const framework::Tensor* x, - const framework::Tensor* y, - const framework::Tensor* out, - const framework::Tensor* dout, - framework::Tensor* dx, - framework::Tensor* dy) { +void default_elementwise_add_grad(const framework::ExecutionContext &ctx, + const framework::Tensor *x, + const framework::Tensor *y, + const framework::Tensor *out, + const framework::Tensor *dout, + framework::Tensor *dx, + framework::Tensor *dy) { int axis = ctx.Attr("axis"); ElemwiseExplicitGradCompute, @@ -106,11 +105,11 @@ template typename std::enable_if< std::is_floating_point::value && std::is_same::value>::type -elementwise_add_grad(const framework::ExecutionContext& ctx, - const framework::Tensor* x, const framework::Tensor* y, - const framework::Tensor* out, - const framework::Tensor* dout, framework::Tensor* dx, - framework::Tensor* dy) { +elementwise_add_grad(const framework::ExecutionContext &ctx, + const framework::Tensor *x, const framework::Tensor *y, + const framework::Tensor *out, + const framework::Tensor *dout, framework::Tensor *dx, + framework::Tensor *dy) { auto blas = math::GetBlas(ctx); if (dx) { @@ -128,27 +127,27 @@ template typename std::enable_if< !std::is_floating_point::value || !std::is_same::value>::type -elementwise_add_grad(const framework::ExecutionContext& ctx, - const framework::Tensor* x, const framework::Tensor* y, - const framework::Tensor* out, - const framework::Tensor* dout, framework::Tensor* dx, - framework::Tensor* dy) { +elementwise_add_grad(const framework::ExecutionContext &ctx, + const framework::Tensor *x, const framework::Tensor *y, + const framework::Tensor *out, + const framework::Tensor *dout, framework::Tensor *dx, + framework::Tensor *dy) { default_elementwise_add_grad(ctx, x, y, out, dout, dx, dy); } template class ElementwiseAddGradKernel : public ElemwiseGradKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { + void Compute(const framework::ExecutionContext &ctx) const override { ElemwiseGradKernel::Compute(ctx); using Tensor = framework::Tensor; - auto* dout = ctx.Input(framework::GradVarName("Out")); - auto* dx = ctx.Output(framework::GradVarName("X")); - auto* dy = ctx.Output(framework::GradVarName("Y")); + auto *dout = ctx.Input(framework::GradVarName("Out")); + auto *dx = ctx.Output(framework::GradVarName("X")); + auto *dy = ctx.Output(framework::GradVarName("Y")); // skip out, x, y - auto* out = dout; + auto *out = dout; auto *x = dout, *y = dout; if (platform::is_cpu_place(ctx.GetPlace()) && dx != nullptr && diff --git a/paddle/fluid/operators/elementwise_div_op.h b/paddle/fluid/operators/elementwise_div_op.h index 41a7950bf0c598507c0fda48c6a43f2fd38c41d2..cdb1264d298ef48d6b3da39d63ff1d09e1561aa4 100644 --- a/paddle/fluid/operators/elementwise_div_op.h +++ b/paddle/fluid/operators/elementwise_div_op.h @@ -28,11 +28,10 @@ template class ElementwiseDivKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - using Tensor = framework::Tensor; + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* z = ctx.Output("Out"); - auto* x = ctx.Input("X"); - auto* y = ctx.Input("Y"); - auto* z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); int axis = ctx.Attr("axis"); ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, diff --git a/paddle/fluid/operators/elementwise_max_op.h b/paddle/fluid/operators/elementwise_max_op.h index bfb5c931958b4ca890ea720af42dad91d5625abb..367489dd563f7d8bdf430517cadf49d4ef2a0105 100644 --- a/paddle/fluid/operators/elementwise_max_op.h +++ b/paddle/fluid/operators/elementwise_max_op.h @@ -29,11 +29,10 @@ template class ElementwiseMaxKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - using Tensor = framework::Tensor; + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* z = ctx.Output("Out"); - auto* x = ctx.Input("X"); - auto* y = ctx.Input("Y"); - auto* z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); int axis = ctx.Attr("axis"); ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, diff --git a/paddle/fluid/operators/elementwise_min_op.h b/paddle/fluid/operators/elementwise_min_op.h index db035ffb52e619b337c8190af4ed0e155aaac48d..1bd0a6279766c8eba92d1e3a76191c59410286b2 100644 --- a/paddle/fluid/operators/elementwise_min_op.h +++ b/paddle/fluid/operators/elementwise_min_op.h @@ -28,11 +28,10 @@ template class ElementwiseMinKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - using Tensor = framework::Tensor; + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* z = ctx.Output("Out"); - auto* x = ctx.Input("X"); - auto* y = ctx.Input("Y"); - auto* z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); int axis = ctx.Attr("axis"); ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, diff --git a/paddle/fluid/operators/elementwise_mul_op.h b/paddle/fluid/operators/elementwise_mul_op.h index b870d08a1a28fd3e678aeb7211f7e3ec8b2c4c65..29e4ab7db1377b6aa80e94a26ab3cb8669f9154a 100644 --- a/paddle/fluid/operators/elementwise_mul_op.h +++ b/paddle/fluid/operators/elementwise_mul_op.h @@ -60,11 +60,10 @@ template class ElementwiseMulKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - using Tensor = framework::Tensor; + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* z = ctx.Output("Out"); - auto* x = ctx.Input("X"); - auto* y = ctx.Input("Y"); - auto* z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); if (x->numel() == y->numel()) { elementwise_mul(ctx, x, y, z); diff --git a/paddle/fluid/operators/elementwise_op.h b/paddle/fluid/operators/elementwise_op.h index 68c6e315cc3b5fa932f8946f6d4f838f4d3fc5a5..5eb4233344e1c49e69dd9830178fd6fb2ae7e51c 100644 --- a/paddle/fluid/operators/elementwise_op.h +++ b/paddle/fluid/operators/elementwise_op.h @@ -13,10 +13,12 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once + #include #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" + #ifdef PADDLE_WITH_MKLDNN #include "paddle/fluid/platform/mkldnn_helper.h" #endif @@ -29,7 +31,8 @@ class ElementwiseOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; using Tensor = framework::Tensor; - void InferShape(framework::InferShapeContext* ctx) const override { + + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of elementwise op should not be null."); PADDLE_ENFORCE(ctx->HasInput("Y"), @@ -37,6 +40,17 @@ class ElementwiseOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of elementwise op should not be null."); + PADDLE_ENFORCE( + ctx->GetInputsVarType("X").front() == + framework::proto::VarType::LOD_TENSOR, + "The input var's type should be LoDTensor, but the received is %s", + ctx->Inputs("X").front(), ctx->GetInputsVarType("X").front()); + PADDLE_ENFORCE( + ctx->GetInputsVarType("Y").front() == + framework::proto::VarType::LOD_TENSOR, + "The input var's type should be LoDTensor, but the received is %s", + ctx->Inputs("Y").front(), ctx->GetInputsVarType("Y").front()); + auto x_dim = ctx->GetInputDim("X"); auto y_dim = ctx->GetInputDim("Y"); PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(), @@ -47,9 +61,8 @@ class ElementwiseOp : public framework::OperatorWithKernel { } framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - auto input_data_type = - framework::ToDataType(ctx.Input("X")->type()); + const framework::ExecutionContext &ctx) const override { + auto input_data_type = framework::GetDataTypeOfVar(ctx.InputVar("X")); #ifdef PADDLE_WITH_MKLDNN if (platform::CanMKLDNNBeUsed(ctx)) { @@ -64,12 +77,12 @@ class ElementwiseOp : public framework::OperatorWithKernel { class ElementwiseOpInferVarType : public framework::VarTypeInference { public: - void operator()(const framework::OpDesc& op_desc, - framework::BlockDesc* block) const override { + void operator()(const framework::OpDesc &op_desc, + framework::BlockDesc *block) const override { auto x_name = op_desc.Input("X")[0]; auto out_name = op_desc.Output("Out")[0]; - auto& x = block->FindRecursiveOrCreateVar(x_name); - auto& out = block->FindRecursiveOrCreateVar(out_name); + auto &x = block->FindRecursiveOrCreateVar(x_name); + auto &out = block->FindRecursiveOrCreateVar(out_name); out.SetType(x.GetType()); out.SetDataType(x.GetDataType()); } @@ -131,6 +144,7 @@ But the output only shares the LoD information with the input $X$. protected: virtual std::string GetName() const = 0; + virtual std::string GetEquation() const = 0; }; @@ -139,7 +153,7 @@ class ElementwiseOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; using Tensor = framework::Tensor; - void InferShape(framework::InferShapeContext* ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), @@ -165,7 +179,7 @@ class ElementwiseOpGrad : public framework::OperatorWithKernel { } framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { + const framework::ExecutionContext &ctx) const override { auto input_data_type = framework::ToDataType( ctx.Input(framework::GradVarName("Out"))->type()); @@ -187,7 +201,7 @@ class ElementwiseOpExplicitGrad : public ElementwiseOpGrad { using operators::ElementwiseOpGrad::GetExpectedKernelType; using Tensor = framework::Tensor; - void InferShape(framework::InferShapeContext* ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); @@ -209,11 +223,11 @@ class ElementwiseOpExplicitGrad : public ElementwiseOpGrad { template class ElemwiseGradKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& context) const override { - auto* dx = + void Compute(const framework::ExecutionContext &context) const override { + auto *dx = context.Output(framework::GradVarName("X")); if (dx != nullptr) { - auto& dout = + auto &dout = *context.Input(framework::GradVarName("Out")); dx->set_lod(dout.lod()); } @@ -234,7 +248,7 @@ class ElemwiseGradKernel : public framework::OpKernel { \ protected: \ std::unique_ptr Apply() const override { \ - auto* op = new paddle::framework::OpDesc(); \ + auto *op = new paddle::framework::OpDesc(); \ op->SetType(#kernel_type "_grad"); \ op->SetInput("Y", Input("Y")); \ op->SetInput(::paddle::framework::GradVarName("Out"), \ diff --git a/paddle/fluid/operators/elementwise_sub_op.h b/paddle/fluid/operators/elementwise_sub_op.h index 3385df0897700d37d60d8804a01db777ebc02a7e..7204c43464e0b81126148b86f64a36b0e299368b 100644 --- a/paddle/fluid/operators/elementwise_sub_op.h +++ b/paddle/fluid/operators/elementwise_sub_op.h @@ -28,11 +28,10 @@ template class ElementwiseSubKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - using Tensor = framework::Tensor; + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* z = ctx.Output("Out"); - auto* x = ctx.Input("X"); - auto* y = ctx.Input("Y"); - auto* z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); int axis = ctx.Attr("axis"); ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, diff --git a/paddle/fluid/operators/extract_rows_op.cc b/paddle/fluid/operators/extract_rows_op.cc deleted file mode 100644 index 3acae3bcdf4a509ab6e7e19f21c4b2ec4d72b7d7..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/extract_rows_op.cc +++ /dev/null @@ -1,103 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include -#include -#include "paddle/fluid/framework/op_registry.h" - -namespace paddle { -namespace operators { - -class ExtractRowsOpInferShape : public framework::InferShapeBase { - public: - void operator()(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of ExtractRowsOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of ExtractRowsOp should not be null."); - PADDLE_ENFORCE_EQ(ctx->GetInputsVarType("X")[0], - framework::proto::VarType::SELECTED_ROWS, - "The type of input(X) must be SelectedRows."); - auto in_dims = ctx->GetInputDim("X"); - - ctx->SetOutputDim( - "Out", framework::make_ddim(std::vector{in_dims[0], 1})); - } -}; - -class ExtractRowsOp : public framework::OperatorBase { - public: - ExtractRowsOp(const std::string &type, - const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : framework::OperatorBase(type, inputs, outputs, attrs) {} - - private: - void RunImpl(const framework::Scope &scope, - const platform::Place &place) const override { - auto &in = scope.FindVar(Input("X"))->Get(); - auto out = scope.FindVar(Output("Out"))->GetMutable(); - - auto &in_rows = in.rows(); - auto out_dim = framework::make_ddim( - std::vector{static_cast(in_rows.size()), 1}); - auto dst_ptr = out->mutable_data(out_dim, in.place()); - - if (paddle::platform::is_gpu_place(in.place())) { -#ifdef PADDLE_WITH_CUDA - platform::DeviceContextPool &pool = - platform::DeviceContextPool::Instance(); - auto *dev_ctx = pool.Get(in.place()); - auto src_ptr = in_rows.Data(in.place()); - auto stream = - reinterpret_cast(*dev_ctx) - .stream(); - memory::Copy(boost::get(out->place()), dst_ptr, - boost::get(in.place()), src_ptr, - in_rows.size() * sizeof(int64_t), stream); -#else - PADDLE_THROW("Not compiled with CUDA."); -#endif - } else { - memory::Copy(platform::CPUPlace(), dst_ptr, platform::CPUPlace(), - in_rows.data(), in_rows.size() * sizeof(int64_t)); - } - } -}; - -class ExtractRowsOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", - "(SelectedRows). The input tensor of extract_rows operator," - " and its type is SelectedRows."); - AddOutput("Out", "(Tensor). The the rows of input(X)."); - - AddComment(R"DOC( - ExtractRows Operator. - -The function of extract_rows_op is extracting the rows from the input(X) -whose type is SelectedRows. - - )DOC"); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OPERATOR(extract_rows, ops::ExtractRowsOp, ops::ExtractRowsOpMaker, - ops::ExtractRowsOpInferShape); diff --git a/paddle/fluid/operators/math/cos_sim_functor.cu b/paddle/fluid/operators/math/cos_sim_functor.cu index 4e6ff5ee0a449b42762748ba1a103876beee01f2..537c7e47155fe9a12196869ceaed84fca198335b 100644 --- a/paddle/fluid/operators/math/cos_sim_functor.cu +++ b/paddle/fluid/operators/math/cos_sim_functor.cu @@ -51,7 +51,7 @@ struct CosSimDyFunctor { T* dy) const { const int block_size = 512; dim3 threads(block_size, 1); - dim3 grid(1, (rows + block_size - 1) / block_size); + dim3 grid((rows + block_size - 1) / block_size, 1); CosSimDyKernel<<>>( x_norm, y_norm, x, y, z, dz, rows, cols, dy); } diff --git a/paddle/fluid/operators/math/fc_compute.h b/paddle/fluid/operators/math/fc_compute.h index 87220d4019fc9337fb8355172ca4f1372cfd4558..b072b4c20a171d148bd892c162436d03da404fb9 100644 --- a/paddle/fluid/operators/math/fc_compute.h +++ b/paddle/fluid/operators/math/fc_compute.h @@ -36,7 +36,7 @@ inline void FCCompute(const BlasT& blas, const int M, .template Get>(N); for (int i = 0; i < M; i++) { T* dst = Y + i * N; - vaddrelu->Compute(B, dst, dst); + vaddrelu->Compute(B, dst, dst, N); } } else { const auto& vadd = jitkernel::KernelPool::Instance() @@ -47,7 +47,7 @@ inline void FCCompute(const BlasT& blas, const int M, #endif for (int i = 0; i < M; i++) { T* dst = Y + i * N; - vadd->Compute(B, dst, dst); + vadd->Compute(B, dst, dst, N); } } } diff --git a/paddle/fluid/operators/math/jit_code.cc b/paddle/fluid/operators/math/jit_code.cc index 9e2cc18c7a5e396be40b2336382f68a17f8a2bf9..6b3eecfbd11471b5d95dcb10c91acc536f78cb85 100644 --- a/paddle/fluid/operators/math/jit_code.cc +++ b/paddle/fluid/operators/math/jit_code.cc @@ -24,43 +24,95 @@ namespace gen { using namespace platform::jit; // NOLINT -bool VMulJitCode::init(int d) { +bool VXXJitCode::init(int d, int scalar_index) { // It's not necessary to use avx512 since it would slow down the frequency // and this kernel is not compute bound. - return MayIUse(avx); + return MayIUse(avx) && scalar_index >= 0 && scalar_index <= 2; } -void VMulJitCode::generate() { +void VXXJitCode::generate() { // do not need push stack, and do not need save avx512reg if do not use avx512 int offset = 0; + if (with_relu_) { + vxorps(ymm_zero, ymm_zero, ymm_zero); + } + if (scalar_index_ == 1) { + vbroadcastss(ymm_src1, ptr[param1]); + } else if (scalar_index_ == 2) { + vbroadcastss(ymm_src2, ptr[param2]); + } for (int i = 0; i < num_ / AVX_FLOAT_BLOCK; ++i) { - vmovups(ymm_src1, ptr[param1 + offset]); - vmovups(ymm_src2, ptr[param2 + offset]); - vmulps(ymm_dst, ymm_src1, ymm_src2); + if (scalar_index_ != 1) { + vmovups(ymm_src1, ptr[param1 + offset]); + } + if (scalar_index_ != 2) { + vmovups(ymm_src2, ptr[param2 + offset]); + } + if (type_ == operand_type::mul) { + vmulps(ymm_dst, ymm_src1, ymm_src2); + } else if (type_ == operand_type::add) { + vaddps(ymm_dst, ymm_src1, ymm_src2); + } + if (with_relu_) { + vmaxps(ymm_dst, ymm_zero, ymm_dst); + } vmovups(ptr[param3 + offset], ymm_dst); offset += sizeof(float) * AVX_FLOAT_BLOCK; } int rest = num_ % AVX_FLOAT_BLOCK; if (rest >= 4) { - vmovups(xmm_src1, ptr[param1 + offset]); - vmovups(xmm_src2, ptr[param2 + offset]); - vmulps(xmm_dst, xmm_src1, xmm_src2); + if (scalar_index_ != 1) { + vmovups(xmm_src1, ptr[param1 + offset]); + } + if (scalar_index_ != 2) { + vmovups(xmm_src2, ptr[param2 + offset]); + } + if (type_ == operand_type::mul) { + vmulps(xmm_dst, xmm_src1, xmm_src2); + } else if (type_ == operand_type::add) { + vaddps(xmm_dst, xmm_src1, xmm_src2); + } + if (with_relu_) { + vmaxps(xmm_dst, xmm_zero, xmm_dst); + } vmovups(ptr[param3 + offset], xmm_dst); offset += sizeof(float) * 4; rest -= 4; } if (rest >= 2) { - vmovq(xmm_src1, ptr[param1 + offset]); - vmovq(xmm_src2, ptr[param2 + offset]); - vmulps(xmm_dst, xmm_src1, xmm_src2); + if (scalar_index_ != 1) { + vmovups(xmm_src1, ptr[param1 + offset]); + } + if (scalar_index_ != 2) { + vmovups(xmm_src2, ptr[param2 + offset]); + } + if (type_ == operand_type::mul) { + vmulps(xmm_dst, xmm_src1, xmm_src2); + } else if (type_ == operand_type::add) { + vaddps(xmm_dst, xmm_src1, xmm_src2); + } + if (with_relu_) { + vmaxps(xmm_dst, xmm_zero, xmm_dst); + } vmovq(ptr[param3 + offset], xmm_dst); offset += sizeof(float) * 2; rest -= 2; } if (rest > 0) { - vmovss(xmm_src1, ptr[param1 + offset]); - vmovss(xmm_src2, ptr[param2 + offset]); - vmulss(xmm_dst, xmm_src1, xmm_src2); + if (scalar_index_ != 1) { + vmovups(xmm_src1, ptr[param1 + offset]); + } + if (scalar_index_ != 2) { + vmovups(xmm_src2, ptr[param2 + offset]); + } + if (type_ == operand_type::mul) { + vmulss(xmm_dst, xmm_src1, xmm_src2); + } else if (type_ == operand_type::add) { + vaddss(xmm_dst, xmm_src1, xmm_src2); + } + if (with_relu_) { + vmaxps(xmm_dst, xmm_zero, xmm_dst); + } vmovss(ptr[param3 + offset], xmm_dst); } ret(); diff --git a/paddle/fluid/operators/math/jit_code.h b/paddle/fluid/operators/math/jit_code.h index 6007b290815de0ceaa2226962c5273ae7da72e7e..aaedb0ae10323eeddfba9512d9e47c7a22320610 100644 --- a/paddle/fluid/operators/math/jit_code.h +++ b/paddle/fluid/operators/math/jit_code.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/operators/math/jit_gen.h" - namespace paddle { namespace operators { namespace math { @@ -29,17 +29,47 @@ using ymm_t = const Xbyak::Ymm; using zmm_t = const Xbyak::Zmm; using Label = Xbyak::Label; -class VMulJitCode : public JitCode { +typedef enum { mul = 0, add } operand_type; + +// function: vec = Operand(vec(or scalar), vec(or scalar)) (maybe with relu) +class VXXJitCode : public JitCode { public: - DECLARE_JIT_CODE(VMulJitCode); - explicit VMulJitCode(int d, size_t code_size = 256 * 1024, - void* code_ptr = nullptr) - : JitCode(code_size, code_ptr), num_(d) {} - static bool init(int d); + const char* name() const override { + std::string base = "VXXJitCode"; + if (scalar_index_ == 1) { + base += "_Scalar"; + } else { + base += "_Vec"; + } + if (type_ == operand_type::mul) { + base += "_Mul"; + } else if (type_ == operand_type::add) { + base += "_Add"; + } + if (scalar_index_ == 2) { + base += "_Scalar"; + } else { + base += "_Vec"; + } + base += (with_relu_ ? "_Relu" : ""); + return base.c_str(); + } + explicit VXXJitCode(int d, operand_type type, int scalar_index, + bool with_relu, size_t code_size = 256 * 1024, + void* code_ptr = nullptr) + : JitCode(code_size, code_ptr), + num_(d), + type_(type), + scalar_index_(scalar_index), + with_relu_(with_relu) {} + static bool init(int d, int scalar_index = 0); void generate() override; private: int num_; + operand_type type_; + int scalar_index_; + bool with_relu_; reg64_t param1{abi_param1}; reg64_t param2{abi_param2}; reg64_t param3{abi_param3}; @@ -47,10 +77,12 @@ class VMulJitCode : public JitCode { xmm_t xmm_src1 = xmm_t(0); xmm_t xmm_src2 = xmm_t(1); xmm_t xmm_dst = xmm_t(2); + xmm_t xmm_zero = xmm_t(3); ymm_t ymm_src1 = ymm_t(0); ymm_t ymm_src2 = ymm_t(1); ymm_t ymm_dst = ymm_t(2); + ymm_t ymm_zero = ymm_t(3); }; } // namespace gen diff --git a/paddle/fluid/operators/math/jit_kernel.h b/paddle/fluid/operators/math/jit_kernel.h index 7b6027aa267803ff8ff830deabda536b1b27fec8..e9b259282cd00cc2afc46634423ec09590bf5dd3 100644 --- a/paddle/fluid/operators/math/jit_kernel.h +++ b/paddle/fluid/operators/math/jit_kernel.h @@ -71,26 +71,27 @@ class VMulKernel : public Kernel { template class VAddKernel : public Kernel { public: - virtual void Compute(const T *x, const T *y, T *z) const = 0; + void (*Compute)(const T *, const T *, T *, int); }; template -class VScalKernel : public Kernel { +class VAddReluKernel : public Kernel { public: - virtual void Compute(const T a, const T *x, T *y) const = 0; - virtual void Compute(const T a, T *x) const = 0; + void (*Compute)(const T *, const T *, T *, int); }; template -class VAddBiasKernel : public Kernel { +class VScalKernel : public Kernel { public: - virtual void Compute(const T a, const T *x, T *y) const = 0; + // y = a.*x + void (*Compute)(const T *, const T *, T *, int); }; template -class VAddReluKernel : public Kernel { +class VAddBiasKernel : public Kernel { public: - virtual void Compute(const T *x, const T *y, T *z) const = 0; + // y = a.+x + void (*Compute)(const T *, const T *, T *, int); }; template diff --git a/paddle/fluid/operators/math/jit_kernel_blas.cc b/paddle/fluid/operators/math/jit_kernel_blas.cc index 8a988f8f482e4a4963f70c39bccd89387c1e0059..c4bfbcf925a2bbdc39f8468049c58e126d3eba1b 100644 --- a/paddle/fluid/operators/math/jit_kernel_blas.cc +++ b/paddle/fluid/operators/math/jit_kernel_blas.cc @@ -42,6 +42,35 @@ void VMulRefer(const T* x, const T* y, T* z, int n) { } } +template +void VAddRefer(const T* x, const T* y, T* z, int n) { + for (int i = 0; i < n; ++i) { + z[i] = x[i] + y[i]; + } +} + +template +void VAddReluRefer(const T* x, const T* y, T* z, int n) { + for (int i = 0; i < n; ++i) { + z[i] = x[i] + y[i]; + z[i] = z[i] > 0 ? z[i] : 0; + } +} + +template +void VScalRefer(const T* a, const T* x, T* y, int n) { + for (int i = 0; i < n; ++i) { + y[i] = a[0] * x[i]; + } +} + +template +void VAddBiasRefer(const T* a, const T* x, T* y, int n) { + for (int i = 0; i < n; ++i) { + y[i] = a[0] + x[i]; + } +} + #ifdef PADDLE_WITH_MKLML template void VMulMKL(const T* x, const T* y, T* z, int n); @@ -50,28 +79,67 @@ template <> void VMulMKL(const float* x, const float* y, float* z, int n) { platform::dynload::vsMul(n, x, y, z); } + template <> void VMulMKL(const double* x, const double* y, double* z, int n) { platform::dynload::vdMul(n, x, y, z); } + +template +void VAddMKL(const T* x, const T* y, T* z, int n); + +template <> +void VAddMKL(const float* x, const float* y, float* z, int n) { + platform::dynload::vsAdd(n, x, y, z); +} + +template <> +void VAddMKL(const double* x, const double* y, double* z, int n) { + platform::dynload::vdAdd(n, x, y, z); +} + +template +void VScalMKL(const T* a, const T* x, T* y, int n); + +template <> +void VScalMKL(const float* a, const float* x, float* y, int n) { + if (x == y) { + platform::dynload::cblas_sscal(n, *a, y, 1); + } else { + VScalRefer(a, x, y, n); + } +} + +template <> +void VScalMKL(const double* a, const double* x, double* y, int n) { + if (x == y) { + platform::dynload::cblas_dscal(n, *a, y, 1); + } else { + VScalRefer(a, x, y, n); + } +} + #endif +#define DECLARE_STATIC_FUNC \ + static inline std::string name(int d) { \ + PADDLE_THROW("DType should be either float or double"); \ + } \ + static inline bool useJIT(int d) { return false; } \ + static inline bool useMKL(int d) { return false; } + /* VMUL JitKernel */ template class VMulKernelImpl : public VMulKernel { public: - static inline std::string name(int d) { - PADDLE_THROW("DType should be either float or double"); - } - static inline bool useJIT(int d) { return false; } - static inline bool useMKL(int d) { return false; } - + DECLARE_STATIC_FUNC; explicit VMulKernelImpl(int d) : VMulKernel() { #ifdef PADDLE_WITH_XBYAK if (useJIT(d)) { // roughly estimate the size of code size_t sz = 96 + d / AVX_FLOAT_BLOCK * 4 * 8; - jitcode_.reset(new gen::VMulJitCode(d, sz > 4096 ? sz : 4096)); + jitcode_.reset(new gen::VXXJitCode(d, gen::operand_type::mul, 0, false, + sz > 4096 ? sz : 4096)); this->Compute = jitcode_->getCode(); return; @@ -89,14 +157,14 @@ class VMulKernelImpl : public VMulKernel { #ifdef PADDLE_WITH_XBYAK private: - std::unique_ptr jitcode_{nullptr}; + std::unique_ptr jitcode_{nullptr}; #endif }; #ifdef PADDLE_WITH_XBYAK template <> bool VMulKernelImpl::useJIT(int d) { - return gen::VMulJitCode::init(d); + return gen::VXXJitCode::init(d); } #endif @@ -112,189 +180,177 @@ bool VMulKernelImpl::useMKL(int d) { } #endif -REGISTER_JITKERNEL(vmul, VMulKernel); - -/* VADD JitKernel */ -template +/* VAdd JitKernel */ +template class VAddKernelImpl : public VAddKernel { public: - explicit VAddKernelImpl(int d) : VAddKernel() { this->num_ = d; } - void Compute(const T* x, const T* y, T* z) const override { - for (int i = 0; i < this->num_; ++i) { - z[i] = x[i] + y[i]; + DECLARE_STATIC_FUNC; + explicit VAddKernelImpl(int d) : VAddKernel() { +#ifdef PADDLE_WITH_XBYAK + if (useJIT(d)) { + size_t sz = 96 + d / AVX_FLOAT_BLOCK * 4 * 8; + jitcode_.reset(new gen::VXXJitCode(d, gen::operand_type::add, 0, false, + sz > 4096 ? sz : 4096)); + this->Compute = + jitcode_->getCode(); + return; } - } -}; - +#endif #ifdef PADDLE_WITH_MKLML -#define MKL_FLOAT(isa, block) \ - template <> \ - void VAddKernelImpl::Compute( \ - const float* x, const float* y, float* z) const { \ - platform::dynload::vsAdd(this->num_, x, y, z); \ + if (useMKL(d)) { + this->Compute = VAddMKL; + return; + } +#endif + this->Compute = VAddRefer; } +#ifdef PADDLE_WITH_XBYAK -#define MKL_DOUBLE(isa, block) \ - template <> \ - void VAddKernelImpl::Compute( \ - const double* x, const double* y, double* z) const { \ - platform::dynload::vdAdd(this->num_, x, y, z); \ - } + private: + std::unique_ptr jitcode_{nullptr}; +#endif +}; -FOR_EACH_ISA(MKL_FLOAT, kGT16); -FOR_EACH_ISA_BLOCK(MKL_DOUBLE); +#ifdef PADDLE_WITH_XBYAK +template <> +bool VAddKernelImpl::useJIT(int d) { + return gen::VXXJitCode::init(d); +} #endif -#define INTRI8_FLOAT(isa) \ - template <> \ - void VAddKernelImpl::Compute( \ - const float* x, const float* y, float* z) const { \ - __m256 tmpx, tmpy; \ - tmpx = _mm256_loadu_ps(x); \ - tmpy = _mm256_loadu_ps(y); \ - tmpx = _mm256_add_ps(tmpx, tmpy); \ - _mm256_storeu_ps(z, tmpx); \ - } -#ifdef __AVX__ -INTRI8_FLOAT(jit::avx); +#ifdef PADDLE_WITH_MKLML +template <> +bool VAddKernelImpl::useMKL(int d) { + return d > 512; +} + +template <> +bool VAddKernelImpl::useMKL(int d) { + return true; +} #endif -#ifdef __AVX2__ -INTRI8_FLOAT(jit::avx2); + +/* VAddRelu JitKernel */ +template +class VAddReluKernelImpl : public VAddReluKernel { + public: + DECLARE_STATIC_FUNC; + explicit VAddReluKernelImpl(int d) : VAddReluKernel() { +#ifdef PADDLE_WITH_XBYAK + if (useJIT(d)) { + size_t sz = 96 + d / AVX_FLOAT_BLOCK * 4 * 8; + jitcode_.reset(new gen::VXXJitCode(d, gen::operand_type::add, 0, true, + sz > 4096 ? sz : 4096)); + this->Compute = + jitcode_->getCode(); + return; + } #endif -#ifdef __AVX512F__ -INTRI8_FLOAT(jit::avx512f); + this->Compute = VAddReluRefer; + } +#ifdef PADDLE_WITH_XBYAK + + private: + std::unique_ptr jitcode_{nullptr}; #endif -// TODO(TJ): eq16 test and complete avx512 +}; -#undef INTRI8_FLOAT -#undef MKL_FLOAT -#undef MKL_DOUBLE +#ifdef PADDLE_WITH_XBYAK +template <> +bool VAddReluKernelImpl::useJIT(int d) { + return gen::VXXJitCode::init(d); +} +#endif -/* VSCAL JitKernel */ -template +/* VScal JitKernel */ +template class VScalKernelImpl : public VScalKernel { public: - explicit VScalKernelImpl(int d) : VScalKernel() { this->num_ = d; } - void Compute(const T a, const T* x, T* y) const override { - for (int i = 0; i < this->num_; ++i) { - y[i] = a * x[i]; - } - } - void Compute(const T a, T* x) const override { - for (int i = 0; i < this->num_; ++i) { - x[i] = a * x[i]; + DECLARE_STATIC_FUNC; + explicit VScalKernelImpl(int d) : VScalKernel() { +#ifdef PADDLE_WITH_XBYAK + if (useJIT(d)) { + size_t sz = 96 + d / AVX_FLOAT_BLOCK * 4 * 8; + jitcode_.reset(new gen::VXXJitCode(d, gen::operand_type::mul, 1, false, + sz > 4096 ? sz : 4096)); + this->Compute = + jitcode_->getCode(); + return; } - } -}; - +#endif #ifdef PADDLE_WITH_MKLML -#define MKL_FLOAT(isa, block) \ - template <> \ - void VScalKernelImpl::Compute(const float a, float* x) \ - const { \ - platform::dynload::cblas_sscal(this->num_, a, x, 1); \ - } - -#define MKL_DOUBLE(isa, block) \ - template <> \ - void VScalKernelImpl::Compute(const double a, double* x) \ - const { \ - platform::dynload::cblas_dscal(this->num_, a, x, 1); \ - } - -FOR_EACH_ISA(MKL_FLOAT, kGT16); -FOR_EACH_ISA_BLOCK(MKL_DOUBLE); + if (useMKL(d)) { + this->Compute = VScalMKL; + return; + } #endif - -#define INTRI8_FLOAT(isa) \ - template <> \ - void VScalKernelImpl::Compute( \ - const float a, const float* x, float* y) const { \ - __m256 tmp; \ - __m256 scalar = _mm256_set1_ps(a); \ - tmp = _mm256_loadu_ps(x); \ - tmp = _mm256_mul_ps(tmp, scalar); \ - _mm256_storeu_ps(y, tmp); \ - } -#define INTRI8_INPLACE_FLOAT(isa) \ - template <> \ - void VScalKernelImpl::Compute(const float a, float* x) \ - const { \ - __m256 tmp; \ - __m256 scalar = _mm256_set1_ps(a); \ - tmp = _mm256_loadu_ps(x); \ - tmp = _mm256_mul_ps(tmp, scalar); \ - _mm256_storeu_ps(x, tmp); \ + this->Compute = VScalRefer; } +#ifdef PADDLE_WITH_XBYAK -#ifdef __AVX__ -INTRI8_FLOAT(jit::avx); -INTRI8_INPLACE_FLOAT(jit::avx); -#endif -#ifdef __AVX2__ -INTRI8_FLOAT(jit::avx2); -INTRI8_INPLACE_FLOAT(jit::avx2); + private: + std::unique_ptr jitcode_{nullptr}; #endif -#ifdef __AVX512F__ -INTRI8_FLOAT(jit::avx512f); -INTRI8_INPLACE_FLOAT(jit::avx512f); +}; + +#ifdef PADDLE_WITH_XBYAK +template <> +bool VScalKernelImpl::useJIT(int d) { + return gen::VXXJitCode::init(d, 1); +} #endif -// TODO(TJ): eq16 test and complete avx512 -#undef INTRI8_FLOAT -#undef INTRI8_INPLACE_FLOAT -#undef MKL_FLOAT -#undef MKL_DOUBLE +#ifdef PADDLE_WITH_MKLML +template <> +bool VScalKernelImpl::useMKL(int d) { + return d > 512; +} +template <> +bool VScalKernelImpl::useMKL(int d) { + return true; +} +#endif /* VAddBias JitKernel */ -template +template class VAddBiasKernelImpl : public VAddBiasKernel { public: - explicit VAddBiasKernelImpl(int d) : VAddBiasKernel() { this->num_ = d; } - void Compute(const T a, const T* x, T* y) const override { - for (int i = 0; i < this->num_; ++i) { - y[i] = x[i] + a; + DECLARE_STATIC_FUNC; + explicit VAddBiasKernelImpl(int d) : VAddBiasKernel() { +#ifdef PADDLE_WITH_XBYAK + if (useJIT(d)) { + size_t sz = 96 + d / AVX_FLOAT_BLOCK * 4 * 8; + jitcode_.reset(new gen::VXXJitCode(d, gen::operand_type::add, 1, false, + sz > 4096 ? sz : 4096)); + this->Compute = + jitcode_->getCode(); + return; } - } -}; - -#define INTRI8_FLOAT(isa) \ - template <> \ - void VAddBiasKernelImpl::Compute( \ - const float a, const float* x, float* y) const { \ - __m256 tmp = _mm256_loadu_ps(x); \ - tmp = _mm256_add_ps(tmp, _mm256_set1_ps(a)); \ - _mm256_storeu_ps(y, tmp); \ - } +#endif -#define INTRI16_FLOAT(isa) \ - template <> \ - void VAddBiasKernelImpl::Compute( \ - const float a, const float* x, float* y) const { \ - __m256 tmp0 = _mm256_loadu_ps(x); \ - __m256 tmp1 = _mm256_loadu_ps(x + 8); \ - tmp0 = _mm256_add_ps(tmp0, _mm256_set1_ps(a)); \ - tmp1 = _mm256_add_ps(tmp1, _mm256_set1_ps(a)); \ - _mm256_storeu_ps(y, tmp0); \ - _mm256_storeu_ps(y + 8, tmp1); \ + this->Compute = VAddBiasRefer; } +#ifdef PADDLE_WITH_XBYAK -#ifdef __AVX__ -INTRI8_FLOAT(jit::avx); -INTRI16_FLOAT(jit::avx); -#endif -#ifdef __AVX2__ -INTRI8_FLOAT(jit::avx2); -INTRI16_FLOAT(jit::avx2); + private: + std::unique_ptr jitcode_{nullptr}; #endif -#ifdef __AVX512F__ -INTRI8_FLOAT(jit::avx512f); -INTRI16_FLOAT(jit::avx512f); +}; + +#ifdef PADDLE_WITH_XBYAK +template <> +bool VAddBiasKernelImpl::useJIT(int d) { + return gen::VXXJitCode::init(d, 1); +} #endif -// TODO(TJ): eq16 test and complete avx512 -#undef INTRI8_FLOAT -#undef INTRI16_FLOAT +#undef DECLARE_STATIC_FUNC + +REGISTER_JITKERNEL(vmul, VMulKernel); +REGISTER_JITKERNEL(vadd, VAddKernel); +REGISTER_JITKERNEL(vaddrelu, VAddReluKernel); +REGISTER_JITKERNEL(vscal, VScalKernel); +REGISTER_JITKERNEL(vaddbias, VAddBiasKernel); /* VRelu JitKernel */ template @@ -405,98 +461,7 @@ class VIdentityKernelImpl : public VIdentityKernel { void Compute(const T* x, T* y) const override {} }; -/* VAddRelu JitKernel */ -template -class VAddReluKernelImpl : public VAddReluKernel { - public: - explicit VAddReluKernelImpl(int d) : VAddReluKernel() { this->num_ = d; } - void Compute(const T* x, const T* y, T* z) const override { - for (int i = 0; i < this->num_; ++i) { - z[i] = x[i] + y[i]; - z[i] = z[i] > 0 ? z[i] : 0; - } - } -}; - -#define INTRI8_FLOAT(isa) \ - template <> \ - void VAddReluKernelImpl::Compute( \ - const float* x, const float* y, float* z) const { \ - __m256 tmpx = _mm256_loadu_ps(x); \ - __m256 tmpy = _mm256_loadu_ps(y); \ - tmpy = _mm256_add_ps(tmpx, tmpy); \ - tmpy = _mm256_max_ps(tmpy, _mm256_setzero_ps()); \ - _mm256_storeu_ps(z, tmpy); \ - } - -#define INTRI16_FLOAT(isa) \ - template <> \ - void VAddReluKernelImpl::Compute( \ - const float* x, const float* y, float* z) const { \ - __m256 zeros = _mm256_setzero_ps(); \ - __m256 tmp0 = _mm256_loadu_ps(x); \ - __m256 tmp1 = _mm256_loadu_ps(y); \ - tmp0 = _mm256_add_ps(tmp0, tmp1); \ - tmp0 = _mm256_max_ps(tmp0, zeros); \ - tmp1 = _mm256_loadu_ps(x + 8); \ - __m256 tmp2 = _mm256_loadu_ps(y + 8); \ - tmp1 = _mm256_add_ps(tmp1, tmp2); \ - tmp1 = _mm256_max_ps(tmp1, zeros); \ - _mm256_storeu_ps(z, tmp0); \ - _mm256_storeu_ps(z + 8, tmp1); \ - } - -#define INTRI_COMMON_FLOAT(isa, block) \ - template <> \ - VAddReluKernelImpl::VAddReluKernelImpl(int d) \ - : VAddReluKernel() { \ - this->num_ = d; \ - this->end_ = d - d % AVX_FLOAT_BLOCK; \ - this->rest_ = d - this->end_; \ - } \ - template <> \ - void VAddReluKernelImpl::Compute( \ - const float* x, const float* y, float* z) const { \ - __m256 zeros = _mm256_setzero_ps(); \ - for (int i = 0; i < this->end_; i += AVX_FLOAT_BLOCK) { \ - __m256 tmpx = _mm256_loadu_ps(x + i); \ - __m256 tmpy = _mm256_loadu_ps(y + i); \ - tmpy = _mm256_add_ps(tmpx, tmpy); \ - tmpy = _mm256_max_ps(tmpy, zeros); \ - _mm256_storeu_ps(z + i, tmpy); \ - } \ - for (int i = this->end_; i < this->num_; ++i) { \ - z[i] = x[i] + y[i]; \ - z[i] = z[i] > 0 ? z[i] : 0; \ - } \ - } - -#ifdef __AVX__ -INTRI8_FLOAT(jit::avx); -INTRI16_FLOAT(jit::avx); -INTRI_COMMON_FLOAT(jit::avx, kGT16); -#endif -#ifdef __AVX2__ -INTRI8_FLOAT(jit::avx2); -INTRI16_FLOAT(jit::avx2); -INTRI_COMMON_FLOAT(jit::avx2, kGT16); -#endif -#ifdef __AVX512F__ -// TODO(TJ): refine avx512 -INTRI8_FLOAT(jit::avx512f); -INTRI16_FLOAT(jit::avx512f); -INTRI_COMMON_FLOAT(jit::avx512f, kGT16); -#endif - -#undef INTRI8_FLOAT -#undef INTRI16_FLOAT -#undef INTRI_COMMON_FLOAT - -REGISTER_JITKERNEL_DEPRECATED(vadd, VAddKernel); -REGISTER_JITKERNEL_DEPRECATED(vscal, VScalKernel); -REGISTER_JITKERNEL_DEPRECATED(vaddb, VAddBiasKernel); REGISTER_JITKERNEL_DEPRECATED(vrelu, VReluKernel); -REGISTER_JITKERNEL_DEPRECATED(vaddrelu, VAddReluKernel); REGISTER_JITKERNEL_DEPRECATED(videntity, VIdentityKernel); } // namespace jitkernel diff --git a/paddle/fluid/operators/math/jit_kernel_exp.cc b/paddle/fluid/operators/math/jit_kernel_exp.cc index d7c177e6782e19e199542e10e1d62587ee0df4cf..c55e54a13f539014c0f582436ca1a105d0b0fedd 100644 --- a/paddle/fluid/operators/math/jit_kernel_exp.cc +++ b/paddle/fluid/operators/math/jit_kernel_exp.cc @@ -409,10 +409,11 @@ class VTanhKernelImpl : public VTanhKernel { vaddbias_ = KernelPool::Instance().template Get>(d); } void Compute(const T* x, T* y) const override { - vscal_->Compute(static_cast(2), x, y); + const T a = static_cast(2), b = static_cast(-1); + vscal_->Compute(&a, x, y, this->num_); vsigmoid_->Compute(y, y); - vscal_->Compute(static_cast(2), y); - vaddbias_->Compute(static_cast(-1), y, y); + vscal_->Compute(&a, y, y, this->num_); + vaddbias_->Compute(&b, y, y, this->num_); } private: @@ -472,10 +473,11 @@ class VTanhKernelImpl : public VTanhKernel { _mm256_storeu_ps(y, tmp); \ x += AVX_FLOAT_BLOCK; \ y += AVX_FLOAT_BLOCK; \ - vscal_->Compute(2.f, x, y); \ + const float a = 2.f, b = -1.f; \ + vscal_->Compute(&a, x, y, this->num_); \ vsigmoid_->Compute(y, y); \ - vscal_->Compute(2.f, y); \ - vaddbias_->Compute(-1.f, y, y); \ + vscal_->Compute(&a, y, y, this->num_); \ + vaddbias_->Compute(&b, y, y, this->num_); \ } #define INTRI_GT16_FLOAT(isa, expisa) \ @@ -502,10 +504,11 @@ class VTanhKernelImpl : public VTanhKernel { } \ x += this->end_; \ y += this->end_; \ - vscal_->Compute(2.f, x, y); \ + const float a = 2.f, b = -1.f; \ + vscal_->Compute(&a, x, y, this->num_); \ vsigmoid_->Compute(y, y); \ - vscal_->Compute(2.f, y); \ - vaddbias_->Compute(-1.f, y, y); \ + vscal_->Compute(&a, y, y, this->num_); \ + vaddbias_->Compute(&b, y, y, this->num_); \ } #ifdef __AVX__ diff --git a/paddle/fluid/operators/math/jit_kernel_rnn.cc b/paddle/fluid/operators/math/jit_kernel_rnn.cc index d0932a37bb85bbc41f662a106c8ef5693a72efeb..ba3e917377cf12192a068a9d71238442e12d5e5e 100644 --- a/paddle/fluid/operators/math/jit_kernel_rnn.cc +++ b/paddle/fluid/operators/math/jit_kernel_rnn.cc @@ -181,7 +181,7 @@ class LSTMKernelImpl : public LSTMKernel { act_cand_d_->Compute(gates, gates); vmul_d_->Compute(gates, gates + d_, gates + d_, d_); vmul_d_->Compute(ct_1, gates + d2_, gates + d2_, d_); - vadd_d_->Compute(gates + d_, gates + d2_, ct); + vadd_d_->Compute(gates + d_, gates + d2_, ct, d_); /* H_t = act_cell(C_t) * ogated */ act_cell_d_->Compute(ct, gates + d2_); @@ -291,16 +291,16 @@ class PeepholeKernelImpl : public LSTMKernel { /* get fgated and igated*/ vmul_d_->Compute(wp_data, ct_1, checked, d_); vmul_d_->Compute(wp_data + d_, ct_1, checked + d_, d_); - vadd_d2_->Compute(checked, gates + d_, gates + d_); + vadd_d2_->Compute(checked, gates + d_, gates + d_, d2_); act_gate_d2_->Compute(gates + d_, gates + d_); /* C_t = C_t-1 * fgated + cand_gated * igated*/ act_cand_d_->Compute(gates, gates); vmul_d_->Compute(gates, gates + d_, gates + d_, d_); vmul_d_->Compute(ct_1, gates + d2_, gates + d2_, d_); - vadd_d_->Compute(gates + d_, gates + d2_, ct); + vadd_d_->Compute(gates + d_, gates + d2_, ct, d_); /* get ogated*/ vmul_d_->Compute(wp_data + d2_, ct, gates + d_, d_); - vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_); + vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_, d_); act_gate_d_->Compute(gates + d3_, gates + d3_); /* H_t = act_cell(C_t) * ogated */ act_cell_d_->Compute(ct, gates + d2_); @@ -314,7 +314,7 @@ class PeepholeKernelImpl : public LSTMKernel { vmul_d_->Compute(gates, gates + d_, ct, d_); /* get outgated, put W_oc * C_t on igated */ vmul_d_->Compute(wp_data + d2_, ct, gates + d_, d_); - vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_); + vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_, d_); /* H_t = act_cell(C_t) * ogated */ act_gate_d_->Compute(gates + d3_, gates + d3_); act_cell_d_->Compute(ct, gates + d2_); diff --git a/paddle/fluid/operators/math/jit_kernel_test.cc b/paddle/fluid/operators/math/jit_kernel_test.cc index 667a95fe1a247cf9d3d63dae74f7e0fa9c2309ca..596bd3b2d324131c30fce7439460226574f0a190 100644 --- a/paddle/fluid/operators/math/jit_kernel_test.cc +++ b/paddle/fluid/operators/math/jit_kernel_test.cc @@ -128,7 +128,7 @@ TEST(JitKernel, vaddbias) { auto trefe = GetCurrentUS(); auto ttgts = GetCurrentUS(); for (int i = 0; i < repeat; ++i) { - ker->Compute(a, x_data, ztgt_data); + ker->Compute(&a, x_data, ztgt_data, d); } auto ttgte = GetCurrentUS(); @@ -281,10 +281,11 @@ void vtanh_better( const paddle::operators::math::jitkernel::VAddBiasKernel>& vaddbias, const int n, const float* x, float* y) { - vscal->Compute(2.f, x, y); + const float a = 2.f, b = -1.f; + vscal->Compute(&a, x, y, n); vsigmoid->Compute(y, y); - vscal->Compute(2.f, y); - vaddbias->Compute(-1.f, y, y); + vscal->Compute(&a, y, y, n); + vaddbias->Compute(&b, y, y, n); } TEST(JitKernel, vtanh) { @@ -371,7 +372,7 @@ void lstm_ctht_better( vtanh_d->Compute(gates, gates); vmul_d->Compute(gates, gates + d, gates + d, d); vmul_d->Compute(ct_1, gates + d2, gates + d2, d); - vadd_d->Compute(gates + d, gates + d2, ct); + vadd_d->Compute(gates + d, gates + d2, ct, d); /* H_t = act_cell(C_t) * ogated */ vtanh_d->Compute(ct, gates + d2); vmul_d->Compute(gates + d2, gates + d * 3, ht, d); @@ -531,12 +532,12 @@ TEST(JitKernel, vscal) { auto ttgts = GetCurrentUS(); for (int i = 0; i < repeat; ++i) { - ker->Compute(a, x_data, ztgt_data); + ker->Compute(&a, x_data, ztgt_data, d); } auto ttgte = GetCurrentUS(); auto ttgts1 = GetCurrentUS(); for (int i = 0; i < repeat; ++i) { - ker->Compute(a, y_data); + ker->Compute(&a, y_data, y_data, d); } auto ttgte1 = GetCurrentUS(); VLOG(3) << "Vec size " << d << ": refer takes: " << (trefe - trefs) / repeat @@ -695,7 +696,7 @@ TEST(JitKernel, vadd) { auto ttgts = GetCurrentUS(); for (int i = 0; i < repeat; ++i) { - ker->Compute(x_data, y_data, ztgt_data); + ker->Compute(x_data, y_data, ztgt_data, d); } auto ttgte = GetCurrentUS(); @@ -723,8 +724,8 @@ void vaddrelu_better( const paddle::operators::math::jitkernel::VAddKernel>& vadd, const std::shared_ptr< const paddle::operators::math::jitkernel::VReluKernel>& vrelu, - const float* x, const float* y, float* z) { - vadd->Compute(x, y, z); + const float* x, const float* y, float* z, int d) { + vadd->Compute(x, y, z, d); vrelu->Compute(z, z); } @@ -752,12 +753,12 @@ TEST(JitKernel, vaddrelu) { auto trefe = GetCurrentUS(); auto tmkls = GetCurrentUS(); for (int i = 0; i < repeat; ++i) { - vaddrelu_better(vadd, vrelu, x_data, y_data, zref_data); + vaddrelu_better(vadd, vrelu, x_data, y_data, zref_data, d); } auto tmkle = GetCurrentUS(); auto ttgts = GetCurrentUS(); for (int i = 0; i < repeat; ++i) { - ker->Compute(x_data, y_data, ztgt_data); + ker->Compute(x_data, y_data, ztgt_data, d); } auto ttgte = GetCurrentUS(); VLOG(3) << "Vec size " << d << ": refer takes: " << (trefe - trefs) / repeat @@ -801,7 +802,11 @@ TEST(JitKernel, pool) { std::dynamic_pointer_cast(pvmul_d)); const auto& pvmul_from_key = jit::KernelPool::Instance().Get("vmulfjit4"); - EXPECT_EQ(pvmul_f, pvmul_from_key); +#if defined(__APPLE__) || defined(__OSX__) || defined(_WIN32) + EXPECT_EQ(pvmul_from_key, nullptr); +#else + EXPECT_EQ(pvmul_from_key, pvmul_f); +#endif const auto& pvmul_from_key2 = jit::KernelPool::Instance().Get("vmulfjit"); EXPECT_TRUE(pvmul_from_key2 == nullptr); } diff --git a/paddle/fluid/operators/math/selected_rows_functor.cu b/paddle/fluid/operators/math/selected_rows_functor.cu index a4fa6f5c898c541120a874f962b0f6a817736510..c4fccdbf862fda8a599869c30ae598573ca367aa 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.cu +++ b/paddle/fluid/operators/math/selected_rows_functor.cu @@ -81,7 +81,7 @@ template __global__ void SelectedRowsAddTensorKernel(const T* selected_rows, const int64_t* rows, T* tensor_out, int64_t row_numel) { - const int ty = blockIdx.y; + const int ty = blockIdx.x; int tid = threadIdx.x; selected_rows += ty * row_numel; @@ -123,7 +123,7 @@ struct SelectedRowsAddTensor { const int block_size = 256; dim3 threads(block_size, 1); - dim3 grid(1, in1_rows.size()); + dim3 grid(in1_rows.size(), 1); SelectedRowsAddTensorKernel< T, block_size><<>>( in1_data, in1_rows.CUDAData(context.GetPlace()), out_data, @@ -188,7 +188,7 @@ __global__ void SelectedRowsAddToTensorKernel(const T* selected_rows, const int64_t* rows, T* tensor_out, int64_t row_numel) { - const int ty = blockIdx.y; + const int ty = blockIdx.x; int tid = threadIdx.x; selected_rows += ty * row_numel; @@ -221,7 +221,7 @@ struct SelectedRowsAddToTensor { auto* in2_data = input2->data(); const int block_size = 256; dim3 threads(block_size, 1); - dim3 grid(1, in1_rows.size()); + dim3 grid(in1_rows.size(), 1); SelectedRowsAddToTensorKernel< T, block_size><<>>( in1_data, in1_rows.CUDAData(context.GetPlace()), in2_data, @@ -388,7 +388,7 @@ template __global__ void UpdateToTensorKernel(const T* selected_rows, const int64_t* rows, const ScatterOps& op, T* tensor_out, int64_t row_numel) { - const int ty = blockIdx.y; + const int ty = blockIdx.x; int tid = threadIdx.x; selected_rows += ty * row_numel; @@ -457,7 +457,7 @@ struct UpdateToTensor { auto* in2_data = input2->data(); dim3 threads(platform::PADDLE_CUDA_NUM_THREADS, 1); - dim3 grid(1, in1_rows.size()); + dim3 grid(in1_rows.size(), 1); UpdateToTensorKernel<<< grid, threads, 0, context.stream()>>>(in1_data, in1_rows.cuda_data(), op, in2_data, in1_row_numel); diff --git a/paddle/fluid/operators/math/selected_rows_functor.h b/paddle/fluid/operators/math/selected_rows_functor.h index 521c53dd0d71707c13c4364c5ee59943a03d4a2d..6d146d39d6d07678e859b82b25ba60ed7661546d 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.h +++ b/paddle/fluid/operators/math/selected_rows_functor.h @@ -64,6 +64,8 @@ struct SelectedRowsSumTo { framework::SelectedRows* input2); }; +// FIXME: The result of SelectedRowsAddToTensor maybe non deterministic, +// because it uses CudaAtomicAdd. // input2 = input1 + input2 template struct SelectedRowsAddToTensor { @@ -88,57 +90,6 @@ struct MergeAdd { framework::SelectedRows* output); }; -template -struct Add { - framework::SelectedRows operator()(const DeviceContext& context, - const framework::SelectedRows& input1, - const framework::SelectedRows& input2) { - framework::SelectedRows out; - out.set_rows(input1.rows()); - out.set_height(input1.height()); - out.mutable_value()->mutable_data(input1.value().dims(), - context.GetPlace()); - auto e_out = framework::EigenVector::Flatten(*(out.mutable_value())); - auto e_in1 = framework::EigenVector::Flatten(input1.value()); - auto e_in2 = framework::EigenVector::Flatten(input2.value()); - e_out.device(*context.eigen_device()) = e_in1 + e_in2; - return out; - } -}; - -template -struct Mul { - // multiply two SelectedRows - framework::SelectedRows operator()(const DeviceContext& context, - const framework::SelectedRows& input1, - const framework::SelectedRows& input2) { - framework::SelectedRows out; - out.set_rows(input1.rows()); - out.set_height(input1.height()); - out.mutable_value()->mutable_data(input1.value().dims(), - context.GetPlace()); - auto e_out = framework::EigenVector::Flatten(*(out.mutable_value())); - auto e_in1 = framework::EigenVector::Flatten(input1.value()); - auto e_in2 = framework::EigenVector::Flatten(input2.value()); - e_out.device(*context.eigen_device()) = e_in1 * e_in2; - return out; - } - // multiply scalar to SelectedRows - framework::SelectedRows operator()(const DeviceContext& context, - const framework::SelectedRows& input1, - const T input2) { - framework::SelectedRows out; - out.set_rows(input1.rows()); - out.set_height(input1.height()); - out.mutable_value()->mutable_data(input1.value().dims(), - context.GetPlace()); - auto e_out = framework::EigenVector::Flatten(*(out.mutable_value())); - auto e_in1 = framework::EigenVector::Flatten(input1.value()); - e_out.device(*context.eigen_device()) = input2 * e_in1; - return out; - } -}; - enum class ScatterOps { ASSIGN, ADD, SUB, SUBBY, MUL, DIV, DIVBY }; // out = seleted_rows_in / tensor diff --git a/paddle/fluid/operators/scale_op.h b/paddle/fluid/operators/scale_op.h index d8a199bc2b860515645b4954b49d8eb59fbd02dc..96b8b00b429df72569ef2a292c8a600c56159f19 100644 --- a/paddle/fluid/operators/scale_op.h +++ b/paddle/fluid/operators/scale_op.h @@ -24,19 +24,13 @@ class ScaleKernel : public framework::OpKernel { public: virtual void Compute(const framework::ExecutionContext& ctx) const { auto* in_var = ctx.InputVar("X"); - auto* in = ctx.Input("X"); - - auto* out_var = ctx.OutputVar("Out"); - auto* out = ctx.Output("Out"); - out->mutable_data(in->place()); - - PADDLE_ENFORCE_EQ(in->dims(), out->dims(), - "in and out should have the same dim"); + auto* in = framework::GetLoDTensorOrSelectedRowsValueFromVar(*in_var); auto scale = static_cast(ctx.Attr("scale")); auto bias = static_cast(ctx.Attr("bias")); auto bias_after_scale = ctx.Attr("bias_after_scale"); + auto* out_var = ctx.OutputVar("Out"); if (in_var->IsType() && in_var != out_var) { auto& in_slr = in_var->Get(); auto* out_slr = out_var->GetMutable(); @@ -44,6 +38,13 @@ class ScaleKernel : public framework::OpKernel { out_slr->set_height(in_slr.height()); } + auto* out = + framework::GetMutableLoDTensorOrSelectedRowsValueFromVar(out_var); + out->mutable_data(in->place()); + + PADDLE_ENFORCE_EQ(in->dims(), out->dims(), + "in and out should have the same dim"); + auto eigen_out = framework::EigenVector::Flatten(*out); auto eigen_in = framework::EigenVector::Flatten(*in); auto& dev = *ctx.template device_context().eigen_device(); diff --git a/paddle/fluid/operators/space_to_depth_op.cc b/paddle/fluid/operators/space_to_depth_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..f109dd685c87ab1b0776a855bb5f510eab1f5526 --- /dev/null +++ b/paddle/fluid/operators/space_to_depth_op.cc @@ -0,0 +1,131 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/space_to_depth_op.h" +#include +#include + +namespace paddle { +namespace operators { + +class SpaceToDepthOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of SpaceToDepthOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of SpaceToDepthOp should not be null."); + + auto x_dims = ctx->GetInputDim("X"); + PADDLE_ENFORCE_EQ(x_dims.size(), 4, "input should be a 4D tensor"); + auto blocksize = ctx->Attrs().Get("blocksize"); + + PADDLE_ENFORCE_GT(blocksize, 1, "The blocksize should be Greater than 1"); + PADDLE_ENFORCE_GT(x_dims[1], 0, "input channel should be Greater than 0"); + PADDLE_ENFORCE_GT(x_dims[2], 0, "input Height should be Greater than 0"); + PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0"); + + PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0, + "input channel should be divisible of the square of " + "SpaceToDepthOp blocksize"); + PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0, + "input Height should be divisible of the square of " + "SpaceToDepthOp blocksize"); + PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0, + "input Width should be divisible of the square of " + "SpaceToDepthOp blocksize"); + + VLOG(3) << "SpaceToDepthOp operator x.shape=" << x_dims + << "Attribute blocksize" << blocksize << std::endl; + + std::vector output_shape(4, 0); // [B,C,H,W] + output_shape[0] = x_dims[0]; + output_shape[1] = x_dims[1] * blocksize * blocksize; + output_shape[2] = x_dims[2] / blocksize; + output_shape[3] = x_dims[3] / blocksize; + + auto out_dims = framework::make_ddim(output_shape); + + ctx->SetOutputDim("Out", out_dims); + + if (x_dims[0] == out_dims[0]) { + // Only pass LoD when the first dimension of output and Input(X) + // are the same. + ctx->ShareLoD("X", /*->*/ "Out"); + } + } +}; + +class SpaceToDepthOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", + "(Tensor). The input should be a 4D tensor B * C * W * H of " + "SpaceToDepthOp " + "operator."); + AddOutput("Out", + "(Tensor), The output should be a 4D tensor B * C2 * W2 * H2 of " + "SpaceToDepthOp operator."); + AddAttr( + "blocksize", + "(int64_t, default 2) blocksize used to do change Space To Depth.") + .SetDefault(2) + .GreaterThan(1); + AddComment(R"DOC( + reorg operator used in Yolo v2. + The equation is: C2 = C1/blocksize * blocksize, W2 = W1 ∗ blocksize + offset % blocksize, H2 = H1 ∗ blocksize + offset / blocksize, + + Reshape Input(X) into the shape according to Attr(blocksize). The + data in Input(X) are unchanged. + + Examples: + + 1. Given a 4-D tensor Input(X) with a shape [128, 2048, 26, 26], and the blocksize is 2, the reorg operator will transform Input(X) + into a 4-D tensor with shape [128, 2048, 13, 13] and leaving Input(X)'s data unchanged. + + )DOC"); + } +}; + +class SpaceToDepthGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) shouldn't be null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) shouldn't be null."); + ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(space_to_depth, ops::SpaceToDepthOp, ops::SpaceToDepthOpMaker, + paddle::framework::DefaultGradOpDescMaker); +REGISTER_OPERATOR(space_to_depth_grad, ops::SpaceToDepthGradOp); +REGISTER_OP_CPU_KERNEL( + space_to_depth, + ops::SpaceToDepthKernel, + ops::SpaceToDepthKernel, + ops::SpaceToDepthKernel); +REGISTER_OP_CPU_KERNEL( + space_to_depth_grad, + ops::SpaceToDepthGradKernel, + ops::SpaceToDepthGradKernel, + ops::SpaceToDepthGradKernel); diff --git a/paddle/fluid/operators/space_to_depth_op.cu b/paddle/fluid/operators/space_to_depth_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..38d0a662733222386b8ecd68d064f3d1abe56c3b --- /dev/null +++ b/paddle/fluid/operators/space_to_depth_op.cu @@ -0,0 +1,30 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/space_to_depth_op.h" + +namespace plat = paddle::platform; +namespace ops = paddle::operators; + +REGISTER_OP_CUDA_KERNEL( + space_to_depth, + ops::SpaceToDepthKernel, + ops::SpaceToDepthKernel, + ops::SpaceToDepthKernel); + +REGISTER_OP_CUDA_KERNEL( + space_to_depth_grad, + ops::SpaceToDepthGradKernel, + ops::SpaceToDepthGradKernel, + ops::SpaceToDepthGradKernel); diff --git a/paddle/fluid/operators/space_to_depth_op.h b/paddle/fluid/operators/space_to_depth_op.h new file mode 100644 index 0000000000000000000000000000000000000000..a71662b4813ab27b65f5c7a918e2bb6fb15a1993 --- /dev/null +++ b/paddle/fluid/operators/space_to_depth_op.h @@ -0,0 +1,127 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#ifndef PADDLE_FLUID_OPERATORS_SPACE_TO_DEPTH_OP_H_ +#define PADDLE_FLUID_OPERATORS_SPACE_TO_DEPTH_OP_H_ +#endif // PADDLE_FLUID_OPERATORS_SPACE_TO_DEPTH_OP_H_ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/for_range.h" + +namespace paddle { +namespace operators { + +template +class space_to_depth_compute { + public: + HOSTDEVICE space_to_depth_compute(const T *x, int64_t w, int64_t h, int64_t c, + int64_t batch, int64_t blocksize, + int64_t forward, T *out) + : x_(x), + w_(w), + h_(h), + c_(c), + batch_(batch), + blocksize_(blocksize), + forward_(forward), + out_(out) {} + + HOSTDEVICE void operator()(int64_t in_index) { + int64_t out_c = c_ / (blocksize_ * blocksize_); + // calculate each dim position with index of tensor + int64_t b = in_index / (c_ * h_ * w_); + int64_t k = (in_index % (c_ * h_ * w_)) / (h_ * w_); + int64_t j = ((in_index % (c_ * h_ * w_)) % (h_ * w_)) / w_; + int64_t i = ((in_index % (c_ * h_ * w_)) % (h_ * w_)) % w_; + + int64_t c2 = k % out_c; + int64_t offset = k / out_c; + int64_t w2 = i * blocksize_ + offset % blocksize_; + int64_t h2 = j * blocksize_ + offset / blocksize_; + int64_t out_index = + w2 + w_ * blocksize_ * (h2 + h_ * blocksize_ * (c2 + out_c * b)); + if (forward_) + out_[out_index] = x_[in_index]; + else + out_[in_index] = x_[out_index]; + } + + private: + const T *x_; + int64_t w_, h_, c_, batch_, blocksize_, forward_; + T *out_; +}; + +template +class SpaceToDepthKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + auto *out = context.Output("Out"); + auto *x = context.Input("X"); + auto blocksize = context.Attr("blocksize"); + auto in_dims = x->dims(); + out->mutable_data(context.GetPlace(), x->type()); + + auto out_dims = out->dims(); + auto B = in_dims[0]; + auto C = in_dims[1]; + auto H = in_dims[2]; + auto W = in_dims[3]; + platform::ForRange for_range( + context.template device_context(), + static_cast(x->numel())); + + auto *x_data = x->data(); + auto *out_data = out->data(); + paddle::operators::space_to_depth_compute computer( + x_data, W, H, C, B, blocksize, 1, out_data); + for_range(computer); + + out->Resize(out_dims); + } +}; + +template +class SpaceToDepthGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + auto *d_out = + context.Input(framework::GradVarName("Out")); + auto *d_x = + context.Output(framework::GradVarName("X")); + auto blocksize = context.Attr("blocksize"); + auto in_dims = d_x->dims(); + d_x->mutable_data(context.GetPlace(), d_out->type()); + + auto B = in_dims[0]; + auto C = in_dims[1]; + auto H = in_dims[2]; + auto W = in_dims[3]; + + platform::ForRange for_range( + context.template device_context(), + static_cast(d_x->numel())); + + auto *dx_data = d_x->data(); + auto *dout_data = d_out->data(); + + paddle::operators::space_to_depth_compute computer( + dout_data, W, H, C, B, blocksize, 0, dx_data); + for_range(computer); + + d_x->Resize(in_dims); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/split_ids_op.cc b/paddle/fluid/operators/split_ids_op.cc index 243f81e296fb95a2c7e9f717950b8a958ad98852..01d432e13068f7b718d08dc15d8cc99a7fbb0afe 100644 --- a/paddle/fluid/operators/split_ids_op.cc +++ b/paddle/fluid/operators/split_ids_op.cc @@ -64,8 +64,7 @@ class SplitIdsOp : public framework::OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( - framework::ToDataType( - ctx.MultiInput("Ids").front()->type()), + framework::GetDataTypeOfVar(ctx.MultiInputVar("Ids").front()), ctx.GetPlace()); } }; diff --git a/paddle/fluid/operators/split_ids_op.h b/paddle/fluid/operators/split_ids_op.h index 69ac6c5a6b9a8b318520eb9a3ff89a3a6be48339..c8b0e7152106db6fa822a7b17cdb6888e75e310b 100644 --- a/paddle/fluid/operators/split_ids_op.h +++ b/paddle/fluid/operators/split_ids_op.h @@ -113,6 +113,10 @@ class SplitIdsOpKernel : public framework::OpKernel { row_width * sizeof(T)); } } + } else { + PADDLE_THROW( + "% should be LoDTensor or SelectedRows, but the received type is %s", + ctx.Inputs("Ids")[0], ids_var->Type().name()); } } }; diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index d19ac9839c90a116265b761e3b1b3f855e2d95e8..7df14158f3429e25fa972a51ef2615cf569e9a73 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -85,8 +85,8 @@ class SumOp : public framework::OperatorWithKernel { for (size_t idx = 0; idx < x_vars.size(); ++idx) { PADDLE_ENFORCE(x_vars[idx] != nullptr, "Input var[%s] should not be nullptr", x_vars_name[idx]); - // FIXME(zcd): The input x_var may be SelectedRows or LoDTensor. - auto tensor = framework::GetTensorFromVar(*x_vars[idx]); + auto tensor = + framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_vars[idx]); if (tensor->numel() == 0) { continue; } diff --git a/paddle/fluid/platform/init.cc b/paddle/fluid/platform/init.cc index 53a1cdad0cca72fa0835dae3bdd915d6ce2a87e8..fadd6ba91f507f9d31e0b0964c41b792f7372214 100644 --- a/paddle/fluid/platform/init.cc +++ b/paddle/fluid/platform/init.cc @@ -119,6 +119,7 @@ void InitDevices(bool init_p2p, const std::vector devices) { platform::SetNumThreads(FLAGS_paddle_num_threads); #endif +#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__OSX__) if (platform::jit::MayIUse(platform::jit::avx)) { #ifndef __AVX__ LOG(WARNING) << "AVX is available, Please re-compile on local machine"; @@ -160,8 +161,9 @@ void InitDevices(bool init_p2p, const std::vector devices) { AVX_GUIDE(AVX, NonAVX); } #endif - #undef AVX_GUIDE + +#endif } void InitGLOG(const std::string &prog_name) { diff --git a/paddle/fluid/pybind/const_value.cc b/paddle/fluid/pybind/const_value.cc index 1f61a0e289f32196ead04d71d07b513cbe4655b1..06d8b65fb1480d9f621ca937c1d66ab7e910f010 100644 --- a/paddle/fluid/pybind/const_value.cc +++ b/paddle/fluid/pybind/const_value.cc @@ -27,6 +27,7 @@ void BindConstValue(pybind11::module* m) { m->def("kZeroVarSuffix", [] { return framework::kZeroVarSuffix; }); m->def("kControlDepVarName", [] { return framework::ir::Node::kControlDepVarName; }); + m->def("kNewGradSuffix", [] { return framework::kNewGradSuffix; }); auto op_proto_and_checker_maker = m->def_submodule("op_proto_and_checker_maker"); diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index fc821e04a0baf9278295da18ee5a69afcf2c4605..238cc19189cfd74afa38bdcb5f5c802f9521dfea 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -742,7 +742,12 @@ All parameter, weight, gradient are variables in Paddle. will clean up the temp variables at the end of the current iteration. 2. In some NLP model, it may cause the GPU memory is insufficient, in this case, you should reduce `num_iteration_per_drop_scope`. - )DOC"); + )DOC") + .def_property("_dry_run", + [](const ExecutionStrategy &self) { return self.dry_run_; }, + [](ExecutionStrategy &self, bool dry_run) { + self.dry_run_ = dry_run; + }); exec_strategy.def_property( "use_experimental_executor", diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index d7676f89ab5e781f910f98d03e72d5f7c1023a9a..2f5fef36c423736666695c07ebf69d812c3488ed 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -367,7 +367,12 @@ function run_test() { Running unit tests ... ======================================== EOF - ctest --output-on-failure + if [ ${TESTING_DEBUG_MODE:-OFF} == "ON" ] ; then + ctest -V + else + ctest --output-on-failure + fi + # make install should also be test when unittest make install -j `nproc` pip install ${INSTALL_PREFIX:-/paddle/build}/opt/paddle/share/wheels/*.whl diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index 80b50022dd1ac5ec739029f6cfff3f7f170ada00..1ab48c00548b58f4b3e411d8e46e8cf496d6b891 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -30,7 +30,8 @@ from ..unique_name import generate as unique_name __all__ = [ 'data', 'open_files', 'read_file', 'shuffle', 'batch', 'double_buffer', - 'random_data_generator', 'py_reader', 'Preprocessor', 'load' + 'random_data_generator', 'py_reader', 'create_py_reader_by_data', + 'Preprocessor', 'load' ] @@ -60,7 +61,7 @@ def data(name, For example if shape=[1], the resulting shape is [-1, 1]. 2. If shape contains -1, such as shape=[1, -1], append_batch_size will be enforced to be be False (ineffective). - dtype(int|float): The type of data : float32, float_16, int etc + dtype(basestring): The type of data : float32, float_16, int etc type(VarType): The output type. By default it is LOD_TENSOR. lod_level(int): The LoD Level. 0 means the input data is not a sequence. stop_gradient(bool): A boolean that mentions whether gradient should flow. @@ -475,6 +476,159 @@ def random_data_generator(low, high, shapes, lod_levels, for_parallel=True): return monkey_patch_reader_methods(main_prog_var) +def _py_reader(capacity, + shapes, + dtypes, + lod_levels=None, + name=None, + use_double_buffer=True, + feed_list=None): + + if feed_list is not None: + if not isinstance(feed_list, list): + raise TypeError("feed_list should be a list of Variable" + " instead of " + str(type(feed_list))) + lod_levels = [] + dtypes = [] + shape_concat = [] + ranks = [] + shapes = [] + + for feed_data in feed_list: + dtypes.append(feed_data.dtype) + shape_concat.extend(feed_data.shape) + ranks.append(len(feed_data.shape)) + shapes.append(feed_data.shape) + lod_levels.append(feed_data.lod_level) + else: + dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] + shape_concat = [] + ranks = [] + + for shape in shapes: + shape_concat.extend(shape) + ranks.append(len(shape)) + + if lod_levels is None: + lod_levels = [0] * len(shapes) + + if name is None: + queue_name = unique_name('lod_tensor_blocking_queue') + reader_name = unique_name('create_py_reader') + double_buffer_name = unique_name('double_buffer') + else: + queue_name = "_".join([name, "queue"]) + reader_name = "_".join([name, "reader"]) + double_buffer_name = "_".join([name, "double_buffer"]) + + var = global_scope().var(queue_name) + feed_queue = core.init_lod_tensor_blocking_queue(var, capacity, shapes) + + startup_blk = default_startup_program().current_block() + startup_var = startup_blk.create_var(name=reader_name) + startup_blk.append_op( + type='create_py_reader', + inputs={'blocking_queue': [queue_name]}, + outputs={'Out': [startup_var]}, + attrs={ + 'shape_concat': shape_concat, + 'lod_levels': lod_levels, + 'ranks': ranks + }) + + startup_var.desc.set_dtypes(dtypes) + startup_var.persistable = True + + main_prog_var = _copy_reader_var_(default_main_program().current_block(), + startup_var) + + reader = monkey_patch_reader_methods(main_prog_var) + if use_double_buffer: + double_buffer_reader = double_buffer(reader, name=double_buffer_name) + # we return a double buffer reader. However, the reset method comes from + # py_reader. + double_buffer_reader.reset = reader.reset + reader = double_buffer_reader + + # monkey patch py_reader special methods + reader.queue = feed_queue + current_reset_method = reader.reset + reader.thread = None + reader.tensor_provider = None + reader.exited = False + + def start_provide_thread(func): + def __provider_thread__(): + for tensors in func(): + array = core.LoDTensorArray() + for item in tensors: + if not isinstance(item, core.LoDTensor): + tmp = core.LoDTensor() + tmp.set(item, core.CPUPlace()) + item = tmp + + array.append(item) + + if reader.exited: + break + feed_queue.push(array) + if reader.exited: + break + feed_queue.close() + + reader.thread = threading.Thread(target=__provider_thread__) + reader.thread.daemon = True + reader.thread.start() + + def __set_tensor_provider__(func): + reader.tensor_provider = func + + def __set_paddle_reader__(paddle_reader): + with program_guard(Program(), Program()): + actual_feed_list = feed_list + if actual_feed_list is None: + actual_feed_list = [] + counter = 0 + for dtype, shape, lod_level in zip(dtypes, shapes, lod_levels): + name = str(counter) + actual_feed_list.append( + data( + name=name, + dtype=dtype, + shape=shape, + lod_level=lod_level)) + counter += 1 + + data_names = [feed_data.name for feed_data in actual_feed_list] + feeder = DataFeeder( + feed_list=actual_feed_list, place=core.CPUPlace()) + paddle_reader = feeder.decorate_reader( + paddle_reader, multi_devices=False) + + def __tensor_provider__(): + for slots in paddle_reader(): + yield [slots[data_name] for data_name in data_names] + + __set_tensor_provider__(__tensor_provider__) + + def __reset__(): + current_reset_method() + if reader.thread is not None and reader.tensor_provider is not None: + reader.exited = True + reader.thread.join() + reader.exited = False + + def __start__(): + start_provide_thread(reader.tensor_provider) + + reader.reset = __reset__ + reader.decorate_tensor_provider = __set_tensor_provider__ + reader.decorate_paddle_reader = __set_paddle_reader__ + reader.start = __start__ + + return reader + + def py_reader(capacity, shapes, dtypes, @@ -599,128 +753,72 @@ def py_reader(capacity, >>> except fluid.core.EOFException: >>> test_reader.reset() """ - dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] - shape_concat = [] - ranks = [] - - for shape in shapes: - shape_concat.extend(shape) - ranks.append(len(shape)) - - if lod_levels is None: - lod_levels = [0] * len(shapes) - - if name is None: - queue_name = unique_name('lod_tensor_blocking_queue') - reader_name = unique_name('create_py_reader') - double_buffer_name = unique_name('double_buffer') - else: - queue_name = "_".join([name, "queue"]) - reader_name = "_".join([name, "reader"]) - double_buffer_name = "_".join([name, "double_buffer"]) - - var = global_scope().var(queue_name) - feed_queue = core.init_lod_tensor_blocking_queue(var, capacity, shapes) - - startup_blk = default_startup_program().current_block() - startup_var = startup_blk.create_var(name=reader_name) - startup_blk.append_op( - type='create_py_reader', - inputs={'blocking_queue': [queue_name]}, - outputs={'Out': [startup_var]}, - attrs={ - 'shape_concat': shape_concat, - 'lod_levels': lod_levels, - 'ranks': ranks - }) - - startup_var.desc.set_dtypes(dtypes) - startup_var.persistable = True - - main_prog_var = _copy_reader_var_(default_main_program().current_block(), - startup_var) - - reader = monkey_patch_reader_methods(main_prog_var) - if use_double_buffer: - double_buffer_reader = double_buffer(reader, name=double_buffer_name) - # we return a double buffer reader. However, the reset method comes from - # py_reader. - double_buffer_reader.reset = reader.reset - reader = double_buffer_reader - - # monkey patch py_reader special methods - reader.queue = feed_queue - current_reset_method = reader.reset - reader.thread = None - reader.tensor_provider = None - reader.exited = False - - def start_provide_thread(func): - def __provider_thread__(): - for tensors in func(): - array = core.LoDTensorArray() - for item in tensors: - if not isinstance(item, core.LoDTensor): - tmp = core.LoDTensor() - tmp.set(item, core.CPUPlace()) - item = tmp - - array.append(item) - - if reader.exited: - break - feed_queue.push(array) - if reader.exited: - break - feed_queue.close() + return _py_reader( + capacity=capacity, + shapes=shapes, + dtypes=dtypes, + lod_levels=lod_levels, + name=name, + use_double_buffer=use_double_buffer) - reader.thread = threading.Thread(target=__provider_thread__) - reader.thread.daemon = True - reader.thread.start() - def __set_tensor_provider__(func): - reader.tensor_provider = func +def create_py_reader_by_data(capacity, + feed_list, + name=None, + use_double_buffer=True): + """ + Create a Python reader for data feeding in Python - def __set_paddle_reader__(paddle_reader): - with program_guard(Program(), Program()): - feed_list = [] - counter = 0 - for dtype, shape, lod_level in zip(dtypes, shapes, lod_levels): - name = str(counter) - feed_list.append( - data( - name=name, - dtype=dtype, - shape=shape, - lod_level=lod_level)) - counter += 1 - - feeder = DataFeeder(feed_list=feed_list, place=core.CPUPlace()) - paddle_reader = feeder.decorate_reader( - paddle_reader, multi_devices=False) + This layer returns a Reader Variable. - def __tensor_provider__(): - for slots in paddle_reader(): - yield [slots[str(idx)] for idx in six.moves.xrange(counter)] + Works much like py_reader except that it's input is feed_list + instead of shapes, dtypes and lod_levels - __set_tensor_provider__(__tensor_provider__) + Args: + capacity(int): The buffer capacity maintained by :code:`py_reader`. + feed_list(list(Variable)): The data feed list. + name(basestring): The prefix Python queue name and Reader name. None will + be generated automatically. + use_double_buffer(bool): Whether use double buffer or not. - def __reset__(): - current_reset_method() - if reader.thread is not None and reader.tensor_provider is not None: - reader.exited = True - reader.thread.join() - reader.exited = False + Returns: + Variable: A Reader from which we can get feeding data. - def __start__(): - start_provide_thread(reader.tensor_provider) + Examples: - reader.reset = __reset__ - reader.decorate_tensor_provider = __set_tensor_provider__ - reader.decorate_paddle_reader = __set_paddle_reader__ - reader.start = __start__ + 1. The basic usage of :code:`py_reader` is as follows: - return reader + >>> import paddle.fluid as fluid + >>> import paddle.dataset.mnist as mnist + >>> + >>> image = fluid.layers.data(name='image', shape=[3,224,224], dtypes='float32') + >>> label = fluid.layers.data(name='label', shape=[1], dtypes='int64') + >>> reader = fluid.layers.create_py_reader_by_data(capacity=64, feed_list=[image, label]) + >>> reader.decorate_paddle_reader( + >>> paddle.reader.shuffle(paddle.batch(mnist.train()) + >>> + >>> img, label = fluid.layers.read_file(reader) + >>> loss = network(img, label) # some network definition + >>> + >>> fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program()) + >>> + >>> exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name) + >>> for epoch_id in range(10): + >>> reader.start() + >>> try: + >>> while True: + >>> exe.run(fetch_list=[loss.name]) + >>> except fluid.core.EOFException: + >>> reader.reset() + """ + return _py_reader( + capacity=capacity, + shapes=None, + dtypes=None, + lod_levels=None, + name=name, + use_double_buffer=use_double_buffer, + feed_list=feed_list) def open_files(filenames, diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index a87f123117491f27c7f024a758200e3a8e41fbc2..b0a8efd5edcf4de7053438d63ba0315997d4e280 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -154,6 +154,7 @@ __all__ = [ 'mul', 'sigmoid_cross_entropy_with_logits', 'maxout', + 'space_to_depth', 'affine_grid', 'sequence_reverse', 'affine_channel', @@ -3060,7 +3061,7 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): x = fluid.layers.data(name='y', shape=[10, 5], dtype='float32', lod_level=1) pad_value = fluid.layers.assign( - input=numpy.array([0], dtype=numpy.float32)) + input=numpy.array([0.0], dtype=numpy.float32)) out = fluid.layers.sequence_pad(x=x, pad_value=pad_value) """ @@ -7674,6 +7675,66 @@ def maxout(x, groups, name=None): return out +def space_to_depth(x, blocksize, name=None): + """ + Gives a blocksize to space_to_depth the input LoDtensor with Layout: [batch, channel, height, width] + + This op rearranges blocks of spatial data, into depth. More specifically, this op outputs a copy of the + input LoDtensor where values from the height and width dimensions are moved to the channel dimension. + The attr blocksize indicates the input block size. + + space_to_depth will reorgnize the elements of input with shape[batch, channel, height, width] according + to blocksize to construct output with shape [batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]: + + space_to_depth is used to This operation is useful for resizing the activations between convolutions + (but keeping all data) + + - Non-overlapping blocks of size block_size x block size are rearranged into depth at each location. + - The depth of the output tensor is block_size * block_size * input channel + - The Y, X coordinates within each block of the input become the high order component of the output channel index + - channel should be divisible by square of blocksize + - height, width should be divsible by blocksize + + + Args: + x(variable): The input LoDtensor. + blocksize(variable): The blocksize to select the element on each feature map should be > 2 + + Returns: + Variable: The output LoDtensor. + + Raises: + TypeError: blocksize type must be a long. + + Examples: + .. code-block:: python + + data = fluid.layers.data( + name='data', shape=[1, 4, 2, 2], dtype='float32') + space_to_depthed = fluid.layers.space_to_depth( + x=data, blocksize=2) + """ + + helper = LayerHelper("space_to_depth", **locals()) + + if not (isinstance(blocksize, int)): + raise ValueError("blocksize must be a python Int") + + if name is None: + out = helper.create_variable_for_type_inference( + dtype=x.dtype) #fix create + else: + out = helper.create_variable( + name=name, dtype=x.dtype, persistable=False) + + helper.append_op( + type="space_to_depth", + inputs={"X": x}, + attrs={"blocksize": blocksize}, + outputs={"Out": out}) + return out + + @templatedoc() def sequence_reverse(x, name=None): """ diff --git a/python/paddle/fluid/op.py b/python/paddle/fluid/op.py index 4e1d1450dea85fe4eb3e68713250836e4beac992..b8bb3db1eedcf25c9b6a02ad3b4f261e8be8efce 100644 --- a/python/paddle/fluid/op.py +++ b/python/paddle/fluid/op.py @@ -108,6 +108,8 @@ class OpDescCreationMethod(object): new_attr.i = user_defined_attr elif attr.type == framework_pb2.FLOAT: new_attr.f = user_defined_attr + elif attr.type == framework_pb2.LONG: + new_attr.l = user_defined_attr elif attr.type == framework_pb2.STRING: new_attr.s = user_defined_attr elif attr.type == framework_pb2.BOOLEAN: diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index 57185da4d1d38f3848994aae105411cf2844843a..d8aace9fdfa601413bb4d4b1b2a309ba6a8e4ece 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -61,14 +61,25 @@ def append_regularization_ops(parameters_and_grads, regularization=None): params_and_grads.append((param, grad)) continue - assert grad.shape == regularization_term.shape + new_grad = grad + if grad.type == core.VarDesc.VarType.SELECTED_ROWS: + # FIXME(zcd): If the grad is SELECTED_ROWS, after regularization, + # the grad's type and name will be changed. But the gradient's name + # is used in ParallelExecutor Reduce mode, so I add a flag for + # the new_grad here. + new_grad = grad.block.create_var( + name=grad.name + core.kNewGradSuffix(), + dtype=param.dtype, + shape=param.shape, + lod_level=param.lod_level, + type=core.VarDesc.VarType.LOD_TENSOR) grad.block.append_op( - type='elementwise_add', - inputs={"X": grad, - "Y": regularization_term}, - outputs={"Out": grad}) - params_and_grads.append((param, grad)) + type='sum', + inputs={"X": [grad, regularization_term]}, + outputs={"Out": new_grad}) + + params_and_grads.append((param, new_grad)) return params_and_grads @@ -142,26 +153,7 @@ class L2DecayRegularizer(WeightDecayRegularizer): assert isinstance(block, framework.Block) decay = block.create_var( - dtype="float32", shape=param.shape, lod_level=param.lod_level) - - if grad.type == core.VarDesc.VarType.SELECTED_ROWS: - idx = block.create_var( - dtype="int64", - shape=param.shape, - type=core.VarDesc.VarType.LOD_TENSOR) - decay = block.create_var( - dtype="float32", - shape=param.shape, - type=core.VarDesc.VarType.LOD_TENSOR) - block.append_op( - type='extract_rows', inputs={'X': grad}, outputs={'Out': idx}) - block.append_op( - type='lookup_table', - inputs={'W': param, - 'Ids': idx}, - outputs={'Out': decay}, - attrs={'is_sparse': True}) - param = decay + dtype=param.dtype, shape=param.shape, lod_level=param.lod_level) # Append Op to calculate decay block.append_op( @@ -218,27 +210,9 @@ class L1DecayRegularizer(WeightDecayRegularizer): """ assert isinstance(param, framework.Parameter) assert isinstance(block, framework.Block) + decay = block.create_var( - dtype="float32", shape=param.shape, lod_level=param.lod_level) - - if grad.type == core.VarDesc.VarType.SELECTED_ROWS: - idx = block.create_var( - dtype="int64", - shape=param.shape, - type=core.VarDesc.VarType.LOD_TENSOR) - decay = block.create_var( - dtype="float32", - shape=param.shape, - type=core.VarDesc.VarType.LOD_TENSOR) - block.append_op( - type='extract_rows', inputs={'X': grad}, outputs={'Out': idx}) - block.append_op( - type='lookup_table', - inputs={'W': param, - 'Ids': idx}, - outputs={'Out': decay}, - attrs={'is_sparse': True}) - param = decay + dtype=param.dtype, shape=param.shape, lod_level=param.lod_level) # Append sign op block.append_op( diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index 3c3167c320e03372428c42ffe6f22a8f26ab509d..b3aac483d5e38e10705ab371087b3f291e0a5413 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -225,29 +225,29 @@ class TestWithInput1x1Filter1x1(TestConv2dOp): #----------------Conv2dCUDNN---------------- -def create_test_cudnn_class(parent, cls_name): +def create_test_cudnn_class(parent): @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestCUDNNCase(parent): def init_kernel_type(self): self.use_cudnn = True - cls_name = "{0}".format(cls_name) + cls_name = "{0}_{1}".format(parent.__name__, "CUDNN") TestCUDNNCase.__name__ = cls_name globals()[cls_name] = TestCUDNNCase -create_test_cudnn_class(TestConv2dOp, "TestPool2DCUDNNOp") -create_test_cudnn_class(TestWithPad, "TestPool2DCUDNNOpCase1") -create_test_cudnn_class(TestWithStride, "TestPool2DCUDNNOpCase2") -create_test_cudnn_class(TestWithGroup, "TestPool2DCUDNNOpCase3") -create_test_cudnn_class(TestWith1x1, "TestPool2DCUDNNOpCase4") -create_test_cudnn_class(TestWithInput1x1Filter1x1, "TestPool2DCUDNNOpCase4") +create_test_cudnn_class(TestConv2dOp) +create_test_cudnn_class(TestWithPad) +create_test_cudnn_class(TestWithStride) +create_test_cudnn_class(TestWithGroup) +create_test_cudnn_class(TestWith1x1) +create_test_cudnn_class(TestWithInput1x1Filter1x1) #----------------Conv2dCUDNN---------------- -def create_test_cudnn_fp16_class(parent, cls_name, grad_check=True): +def create_test_cudnn_fp16_class(parent, grad_check=True): @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestConv2DCUDNNFp16(parent): @@ -279,23 +279,17 @@ def create_test_cudnn_fp16_class(parent, cls_name, grad_check=True): max_relative_error=0.02, no_grad_set=set(['Input'])) - cls_name = "{0}".format(cls_name) + cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16") TestConv2DCUDNNFp16.__name__ = cls_name globals()[cls_name] = TestConv2DCUDNNFp16 -create_test_cudnn_fp16_class( - TestConv2dOp, "TestPool2DCUDNNFp16Op", grad_check=False) -create_test_cudnn_fp16_class( - TestWithPad, "TestPool2DCUDNNFp16OpCase1", grad_check=False) -create_test_cudnn_fp16_class( - TestWithStride, "TestPool2DCUDNNFp16OpCase2", grad_check=False) -create_test_cudnn_fp16_class( - TestWithGroup, "TestPool2DCUDNNFp16OpCase3", grad_check=False) -create_test_cudnn_fp16_class( - TestWith1x1, "TestPool2DCUDNNFp16OpCase4", grad_check=False) -create_test_cudnn_fp16_class( - TestWithInput1x1Filter1x1, "TestPool2DCUDNNFp16OpCase4", grad_check=False) +create_test_cudnn_fp16_class(TestConv2dOp, grad_check=False) +create_test_cudnn_fp16_class(TestWithPad, grad_check=False) +create_test_cudnn_fp16_class(TestWithStride, grad_check=False) +create_test_cudnn_fp16_class(TestWithGroup, grad_check=False) +create_test_cudnn_fp16_class(TestWith1x1, grad_check=False) +create_test_cudnn_fp16_class(TestWithInput1x1Filter1x1, grad_check=False) # -------TestDepthwiseConv diff --git a/python/paddle/fluid/tests/unittests/test_dist_base.py b/python/paddle/fluid/tests/unittests/test_dist_base.py index 45fae63b01e6a766202408d023e0292579c2857a..4b8a215190a90c974a9ecc8658d044c59b80c989 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_base.py +++ b/python/paddle/fluid/tests/unittests/test_dist_base.py @@ -98,17 +98,18 @@ class TestDistRunnerBase(object): strategy.allow_op_delay = False build_stra = fluid.BuildStrategy() - if args.batch_merge_repeat > 1: - pass_builder = build_stra._create_passes_from_strategy() - mypass = pass_builder.insert_pass( - len(pass_builder.all_passes()) - 2, "multi_batch_merge_pass") - mypass.set_int("num_repeats", args.batch_merge_repeat) if args.use_reduce: build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce else: build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce + if args.batch_merge_repeat > 1: + pass_builder = build_stra._create_passes_from_strategy() + mypass = pass_builder.insert_pass( + len(pass_builder.all_passes()) - 2, "multi_batch_merge_pass") + mypass.set_int("num_repeats", args.batch_merge_repeat) + exe = fluid.ParallelExecutor( args.use_cuda, loss_name=avg_cost.name, diff --git a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py index 986fdd9ff27fe2be54ce97f330028b4ae2358714..3a5b6b5cb8ee4f83c26a96e868e7c75933d28c15 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py +++ b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py @@ -373,9 +373,8 @@ class TestL2Decay(TranspilerTest): self.assertEqual(len(pserver.blocks), 3) self.assertEqual([op.type for op in pserver.blocks[1].ops], ["sum", "scale", "clip", "sgd"]) - self.assertEqual( - [op.type for op in pserver.blocks[2].ops], - ["sum", "scale", "clip", "scale", "elementwise_add", "sgd"]) + self.assertEqual([op.type for op in pserver.blocks[2].ops], + ["sum", "scale", "clip", "scale", "sum", "sgd"]) # TODO(typhoonzero): test clipping and L2Decay ops are removed from trainer @@ -416,12 +415,10 @@ class TestL2DecayWithPiecewise(TranspilerTest): "logical_and", "conditional_block", "fill_constant", "conditional_block" ]) - self.assertEqual( - [op.type for op in pserver.blocks[7].ops], - ["sum", "scale", "scale", "elementwise_add", "momentum"]) - self.assertEqual( - [op.type for op in pserver.blocks[8].ops], - ["sum", "scale", "scale", "elementwise_add", "momentum"]) + self.assertEqual([op.type for op in pserver.blocks[7].ops], + ["sum", "scale", "scale", "sum", "momentum"]) + self.assertEqual([op.type for op in pserver.blocks[8].ops], + ["sum", "scale", "scale", "sum", "momentum"]) class TestEmptyPserverOptimizeBlocks(TranspilerTest): diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py index 6a129b6df9bf1830fdf5eb5cb9ae0c5e4f7bb4ec..53409e436c0739bce63a3a8f90591e0ca6836859 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py @@ -117,56 +117,5 @@ class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): } -class TestElementWiseMulSelectedRows(OpTest): - def setUp(self): - self.rows = [0, 1, 2, 3, 4, 5, 6] - self.feature = 12 - self.height = 100 - self.input_shape = (len(self.rows), self.feature) - - def prepare_input(self, scope, place): - self.input = { - "X": np.random.random(self.input_shape).astype("float32"), - "Y": np.random.random(self.input_shape).astype("float32") - } - - def init_input(in_name): - x_selected_rows = scope.var(in_name).get_selected_rows() - x_selected_rows.set_height(self.height) - x_selected_rows.set_rows(self.rows) - x_array = self.input[in_name] - x_tensor = x_selected_rows.get_tensor() - x_tensor.set(x_array, place) - - init_input("X") - init_input("Y") - - def create_out_selected_row(self, scope): - return scope.var('Out').get_selected_rows() - - def check_result(self, out_selected_rows): - assert out_selected_rows.height() == self.height - assert out_selected_rows.rows() == self.rows - out_tensor = np.array(out_selected_rows.get_tensor()) - assert out_tensor.shape == self.input_shape - - def check_with_place(self, place): - scope = core.Scope() - self.prepare_input(scope, place) - - out_selected_rows = self.create_out_selected_row(scope) - out_selected_rows.set_height(0) - out_selected_rows.set_rows([]) - - elementwise_mul = Operator("elementwise_mul", X='X', Y='Y', Out='Out') - elementwise_mul.run(scope, place) - self.check_result(out_selected_rows) - - def test_elewisemul_with_selected_rows_input(self): - places = [core.CPUPlace()] - for place in places: - self.check_with_place(place) - - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_extract_rows_op.py b/python/paddle/fluid/tests/unittests/test_extract_rows_op.py deleted file mode 100644 index 8629bcf0f2e3c37aefdbf79b203176a43e0c3a7e..0000000000000000000000000000000000000000 --- a/python/paddle/fluid/tests/unittests/test_extract_rows_op.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest -import numpy as np -import paddle.fluid.core as core -from paddle.fluid.op import Operator -from op_test import OpTest - - -class TestExtractRows(OpTest): - def check_with_place(self, place): - scope = core.Scope() - - # create and initialize Variable - feature_len = 12 - rows = [0, 4, 4, 7] - np_array = np.ones((len(rows), feature_len)).astype("float32") - - in_x = scope.var('X').get_selected_rows() - in_x.set_height(len(rows)) - in_x.set_rows(rows) - in_x_tensor = in_x.get_tensor() - in_x_tensor.set(np_array, place) - - # create Out Variable - out_tensor = scope.var('Out').get_tensor() - - # create and run lookup_table operator - extract_rows_op = Operator("extract_rows", X='X', Out='Out') - extract_rows_op.run(scope, place) - - # get result from Out - result_array = np.array(out_tensor) - result_array = [ele[0] for ele in result_array] - assert result_array == rows - - def test_concat_rows(self): - places = [core.CPUPlace()] - if core.is_compiled_with_cuda(): - places.append(core.CUDAPlace(0)) - for place in places: - self.check_with_place(place) - - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index c4ecc2c2c2563fcad09821453ee73e41f81407d5..49ba41e6fc908e9713414120bbeb45ca715042c3 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -248,6 +248,17 @@ class TestBook(unittest.TestCase): self.assertIsNotNone(layers.softmax(hid)) print(str(program)) + def test_space_to_depth(self): + program = Program() + with program_guard(program): + data = layers.data( + name='data', + shape=[32, 9, 6, 6], + append_batch_size=False, + dtype='float32') + self.assertIsNotNone(layers.space_to_depth(data, 3)) + print(str(program)) + def test_sequence_unsqueeze(self): program = Program() with program_guard(program): diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py index d6dbedcf875b06f5af4597e6dad71a39f286a471..84b0aad8acb096a32f625e32fb640599f2882d97 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py @@ -16,6 +16,7 @@ from __future__ import print_function import paddle.dataset.conll05 as conll05 import paddle.fluid as fluid +import paddle.fluid.core as core import unittest import paddle import numpy as np @@ -177,32 +178,36 @@ class TestCRFModel(unittest.TestCase): def test_update_sparse_parameter_all_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce - self.check_network_convergence( - is_sparse=True, build_strategy=build_strategy, use_cuda=True) + if core.is_compiled_with_cuda(): + self.check_network_convergence( + is_sparse=True, build_strategy=build_strategy, use_cuda=True) self.check_network_convergence( is_sparse=True, build_strategy=build_strategy, use_cuda=False) def test_update_dense_parameter_all_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce - self.check_network_convergence( - is_sparse=False, build_strategy=build_strategy, use_cuda=True) + if core.is_compiled_with_cuda(): + self.check_network_convergence( + is_sparse=False, build_strategy=build_strategy, use_cuda=True) self.check_network_convergence( is_sparse=False, build_strategy=build_strategy, use_cuda=False) def test_update_sparse_parameter_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce - self.check_network_convergence( - is_sparse=True, build_strategy=build_strategy, use_cuda=True) + if core.is_compiled_with_cuda(): + self.check_network_convergence( + is_sparse=True, build_strategy=build_strategy, use_cuda=True) self.check_network_convergence( is_sparse=True, build_strategy=build_strategy, use_cuda=False) def test_update_dense_parameter_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce - self.check_network_convergence( - is_sparse=False, build_strategy=build_strategy, use_cuda=True) + if core.is_compiled_with_cuda(): + self.check_network_convergence( + is_sparse=False, build_strategy=build_strategy, use_cuda=True) self.check_network_convergence( is_sparse=False, build_strategy=build_strategy, use_cuda=False) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py new file mode 100644 index 0000000000000000000000000000000000000000..c93740669f40aee3a6c143d153cfd0f5bb72dbd9 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py @@ -0,0 +1,80 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid as fluid +import unittest +import logging +import six + + +class TestBase(unittest.TestCase): + def main(self, + network_func, + iter=100, + iter_per_pe=100, + use_gpu=True, + use_experimental_executor=False): + if use_gpu and not fluid.core.is_compiled_with_cuda(): + logging.warning( + "Paddle is not compiled with CUDA, skip GPU unittests") + return + + main_prog = fluid.Program() + startup_prog = fluid.Program() + scope = fluid.Scope() + with fluid.program_guard(main_prog, startup_prog): + with fluid.scope_guard(scope): + loss = network_func() + fluid.Executor( + fluid.CUDAPlace(0) + if use_gpu else fluid.CPUPlace()).run(startup_prog) + + for _ in six.moves.xrange(iter): + exe_strategy = fluid.ExecutionStrategy() + exe_strategy._dry_run = True + exe_strategy.use_experimental_executor = use_experimental_executor + pe = fluid.ParallelExecutor( + use_cuda=True, + loss_name=loss.name, + main_program=main_prog, + exec_strategy=exe_strategy) + for _ in six.moves.xrange(iter_per_pe): + pe.run([]) + + +class TestMNISTDryRun(TestBase): + def test_mnist_dry_run(self): + for use_gpu in (False, True): + for use_experimental_executor in (False, True): + self.main( + network_func=TestMNISTDryRun.network_func, + use_gpu=use_gpu, + use_experimental_executor=use_experimental_executor) + + @staticmethod + def network_func(): + img = fluid.layers.data(name='img', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + hidden = img + for _ in six.moves.xrange(10): + hidden = fluid.layers.fc(input=img, size=200, act='tanh') + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + fluid.optimizer.Adam().minimize(avg_loss) + return avg_loss + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py index af3745987aa3eae96968bdc6b5c9cd951e9ca6fa..3eecc4670152e72443f731c71d7db67ca8e02e72 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py @@ -14,30 +14,18 @@ from __future__ import print_function -from parallel_executor_test_base import TestParallelExecutorBase -import paddle.fluid as fluid -import paddle.fluid.core as core -import numpy as np -import paddle -import paddle.dataset.mnist as mnist import unittest -import os -MNIST_RECORDIO_FILE = "./mnist_test_pe.recordio" +import numpy as np +import paddle.fluid.core as core +import os +import paddle.fluid as fluid +from parallel_executor_test_base import TestParallelExecutorBase def simple_fc_net(use_feed): - if use_feed: - img = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - else: - reader = fluid.layers.open_files( - filenames=[MNIST_RECORDIO_FILE], - shapes=[[-1, 784], [-1, 1]], - lod_levels=[0, 0], - dtypes=['float32', 'int64']) - reader = fluid.layers.io.double_buffer(reader) - img, label = fluid.layers.read_file(reader) + img = fluid.layers.data(name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') hidden = img for _ in range(4): hidden = fluid.layers.fc( @@ -53,17 +41,8 @@ def simple_fc_net(use_feed): def fc_with_batchnorm(use_feed): - if use_feed: - img = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - else: - reader = fluid.layers.open_files( - filenames=[MNIST_RECORDIO_FILE], - shapes=[[-1, 784], [-1, 1]], - lod_levels=[0, 0], - dtypes=['float32', 'int64']) - reader = fluid.layers.io.double_buffer(reader) - img, label = fluid.layers.read_file(reader) + img = fluid.layers.data(name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') hidden = img for _ in range(1): @@ -88,19 +67,6 @@ class TestMNIST(TestParallelExecutorBase): @classmethod def setUpClass(cls): os.environ['CPU_NUM'] = str(4) - # Convert mnist to recordio file - with fluid.program_guard(fluid.Program(), fluid.Program()): - reader = paddle.batch(mnist.train(), batch_size=4) - feeder = fluid.DataFeeder( - feed_list=[ # order is image and label - fluid.layers.data( - name='image', shape=[784]), - fluid.layers.data( - name='label', shape=[1], dtype='int64'), - ], - place=fluid.CPUPlace()) - fluid.recordio_writer.convert_reader_to_recordio_file( - MNIST_RECORDIO_FILE, reader, feeder) def _init_data(self): np.random.seed(5) @@ -111,10 +77,6 @@ class TestMNIST(TestParallelExecutorBase): def _compare_reduce_and_allreduce(self, model, use_cuda): if use_cuda and not core.is_compiled_with_cuda(): return - self.check_network_convergence( - model, use_cuda=use_cuda, use_reduce=True) - self.check_network_convergence( - model, use_cuda=use_cuda, allow_op_delay=True, use_reduce=True) img, label = self._init_data() @@ -140,9 +102,6 @@ class TestMNIST(TestParallelExecutorBase): def check_simple_fc_convergence(self, use_cuda, use_reduce=False): if use_cuda and not core.is_compiled_with_cuda(): return - self.check_network_convergence(simple_fc_net, use_cuda=use_cuda) - self.check_network_convergence( - simple_fc_net, use_cuda=use_cuda, allow_op_delay=True) img, label = self._init_data() @@ -199,8 +158,6 @@ class TestMNIST(TestParallelExecutorBase): if use_cuda and not core.is_compiled_with_cuda(): return - self.check_network_convergence(fc_with_batchnorm, use_cuda=use_cuda) - img, label = self._init_data() self.check_network_convergence( diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py b/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py index b7fad9b3a60632adb564e1d155a3d935706b467f..d94494e219c5f348a08b4c3c2d111674ea6badf3 100644 --- a/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py +++ b/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py @@ -53,15 +53,24 @@ def simple_fc_net(in_size, hidden_sizes, batch_size, queue_capacity, - use_double_buffer=False): - reader = fluid.layers.py_reader( - capacity=queue_capacity, - shapes=[[-1, in_size], [-1, 1]], - lod_levels=[0, 0], - dtypes=['float32', 'int64'], - use_double_buffer=False) - feed_queue = reader.queue - reader = fluid.layers.batch(reader, batch_size=batch_size) + use_double_buffer=False, + use_feed_list=True): + if use_feed_list: + data = fluid.layers.data(name="data", dtype='float32', shape=[in_size]) + label = fluid.layers.data(name='label', dtype='int64', shape=[1]) + py_reader = fluid.layers.create_py_reader_by_data( + capacity=queue_capacity, + use_double_buffer=False, + feed_list=[data, label]) + else: + py_reader = fluid.layers.py_reader( + capacity=queue_capacity, + shapes=[[-1, in_size], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64'], + use_double_buffer=False) + feed_queue = py_reader.queue + reader = fluid.layers.batch(py_reader, batch_size=batch_size) if use_double_buffer: reader = fluid.layers.double_buffer(reader) @@ -83,7 +92,7 @@ def simple_fc_net(in_size, optimizer = fluid.optimizer.Adam() optimizer.minimize(loss) - return in_data, label, loss, optimizer, feed_queue + return in_data, label, loss, optimizer, feed_queue, py_reader class TestPyReaderUsingExecutor(unittest.TestCase): @@ -100,16 +109,22 @@ class TestPyReaderUsingExecutor(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): for use_parallel_executor in [False, True]: for use_double_buffer in [False, True]: - print('Test Parameters:'), - print({ - 'use_cuda': use_cuda, - 'use_parallel_executor': use_parallel_executor, - 'use_double_buffer': use_double_buffer - }) - self.main(use_cuda, use_parallel_executor, - use_double_buffer) - - def random_reader(self): + for use_feed_list in [False, True]: + for use_decorate_paddle_reader in [False, True]: + print('Test Parameters:'), + print({ + 'use_cuda': use_cuda, + 'use_parallel_executor': use_parallel_executor, + 'use_double_buffer': use_double_buffer, + 'use_feed_list': use_feed_list, + 'use_decorate_paddle_reader': + use_decorate_paddle_reader + }) + self.main(use_cuda, use_parallel_executor, + use_double_buffer, use_feed_list, + use_decorate_paddle_reader) + + def tensor_reader(self, use_decorate_paddle_reader): def reader(): self.inputs = [] cnt = 0 @@ -133,34 +148,43 @@ class TestPyReaderUsingExecutor(unittest.TestCase): elif not self.use_double_buffer: break - yield tensors + if use_decorate_paddle_reader: + yield [(in_data, label)] + else: + yield tensors cnt += 1 - yield None + if not use_decorate_paddle_reader: + yield None return reader def main(self, use_cuda=True, use_parallel_executor=False, - use_double_buffer=False): + use_double_buffer=False, + use_feed_list=False, + use_decorate_paddle_reader=False): assert not use_cuda or use_cuda and core.is_compiled_with_cuda() self.use_cuda = use_cuda self.use_parallel_executor = use_parallel_executor self.use_double_buffer = use_double_buffer + self.use_feed_list = use_feed_list + self.use_decorate_paddle_reader = use_decorate_paddle_reader startup_program = fluid.Program() main_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - in_data, label, loss, optimizer, feed_queue = simple_fc_net( + in_data, label, loss, optimizer, feed_queue, py_reader = simple_fc_net( in_size=self.in_size, class_num=self.class_num, hidden_sizes=self.hidden_sizes, batch_size=self.batch_size, queue_capacity=self.queue_capacity, - use_double_buffer=self.use_double_buffer) + use_double_buffer=self.use_double_buffer, + use_feed_list=self.use_feed_list) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() @@ -178,10 +202,14 @@ class TestPyReaderUsingExecutor(unittest.TestCase): main_exe = startup_exe self.batch_size_times = 1 - reader = self.random_reader() - thread = threading.Thread( - target=feed_data, args=(feed_queue, reader)) - thread.start() + reader = self.tensor_reader(use_decorate_paddle_reader) + if use_decorate_paddle_reader: + py_reader.decorate_paddle_reader(reader) + py_reader.start() + else: + thread = threading.Thread( + target=feed_data, args=(feed_queue, reader)) + thread.start() self.outputs = [] for _ in range(self.iterations): diff --git a/python/paddle/fluid/tests/unittests/test_regularizer.py b/python/paddle/fluid/tests/unittests/test_regularizer.py index 6727335c6059161d235a64a1b90d36b84004f9b3..20f91cf4485f2e79c20fe90143c8b7deebb9fc49 100644 --- a/python/paddle/fluid/tests/unittests/test_regularizer.py +++ b/python/paddle/fluid/tests/unittests/test_regularizer.py @@ -55,7 +55,7 @@ class TestL2DecayRegularizer(unittest.TestCase): params_grads = optimizer.append_regularization_ops(params_grads) self.assertEqual(len(params_grads), 1) self.assertEqual(len(block.ops), count_ops + 2) - self.assertEqual(block.ops[-1].type, 'elementwise_add') + self.assertEqual(block.ops[-1].type, 'sum') self.assertEqual(block.ops[-2].type, 'scale') @@ -92,7 +92,7 @@ class TestL1DecayRegularizer(unittest.TestCase): params_grads = optimizer.append_regularization_ops(params_grads) self.assertEqual(len(params_grads), 1) self.assertEqual(len(block.ops), count_ops + 3) - self.assertEqual(block.ops[-1].type, 'elementwise_add') + self.assertEqual(block.ops[-1].type, 'sum') self.assertEqual(block.ops[-2].type, 'scale') self.assertEqual(block.ops[-3].type, 'sign') diff --git a/python/paddle/fluid/tests/unittests/test_space_to_depth_op.py b/python/paddle/fluid/tests/unittests/test_space_to_depth_op.py new file mode 100644 index 0000000000000000000000000000000000000000..5fdad44f1242b9ee99040b43d7ce2cf84664eed1 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_space_to_depth_op.py @@ -0,0 +1,135 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import unittest +import numpy as np +import paddle.fluid as fluid +from op_test import OpTest + + +class TestSpaceToDepthOp(OpTest): + @staticmethod + def helper(in_, width, height, channel, batch, blocksize, forward, out_): + channel_out = channel // (blocksize * blocksize) + for b in range(batch): + for k in range(channel): + for j in range(height): + for i in range(width): + in_index = i + width * (j + height * (k + channel * b)) + channel2 = k % channel_out + offset = k // channel_out + width2 = i * blocksize + offset % blocksize + height2 = j * blocksize + offset // blocksize + out_index = width2 + width * blocksize * ( + height2 + height * blocksize * + (channel2 + channel_out * b)) + if forward: + out_[out_index] = in_[in_index] + else: + out_[in_index] = in_[out_index] + + def setUp(self): + self.init_data() + + self.op_type = "space_to_depth" + self.inputs = {"X": self.x} + self.helper(self.x_1d, self.x.shape[3], self.x.shape[2], + self.x.shape[1], self.x.shape[0], self.blocksize, + self.forward, self.out_1d) + self.out = np.reshape(self.out_1d, self.infered_shape) + self.attrs = {"blocksize": self.blocksize} + self.outputs = {"Out": self.out} + + def init_data(self): + self.ori_shape = (32, 12, 6, 6) + self.infered_shape = (32, 48, 3, 3) + self.one_d_len = 32 * 48 * 3 * 3 + + self.blocksize = 2 + self.x = np.random.random(self.ori_shape).astype('float32') + self.x_1d = np.reshape(self.x, self.one_d_len) + self.out = np.zeros(self.infered_shape).astype('float32') + self.out_1d = np.reshape(self.out, self.one_d_len) + self.forward = 1 + + def test_check_output(self): + place = fluid.core.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( + ) else fluid.core.CPUPlace() + self.check_output_with_place(place, 1e-5, None, False) + + def test_check_grad(self): + place = fluid.core.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( + ) else fluid.core.CPUPlace() + self.check_grad_with_place(place, ['X'], 'Out') + + +class TestSpaceToDepthOpBasic(TestSpaceToDepthOp): + def init_data(self): + self.ori_shape = (32, 8, 6, 6) + self.infered_shape = (32, 32, 3, 3) + self.one_d_len = 32 * 32 * 3 * 3 + + self.blocksize = 2 + self.x = np.random.random(self.ori_shape).astype('float32') + self.x_1d = np.reshape(self.x, self.one_d_len) + self.out = np.zeros(self.infered_shape).astype('float32') + self.out_1d = np.reshape(self.out, self.one_d_len) + self.forward = 1 + + +class TestSpaceToDepthOpDoubleBasic(TestSpaceToDepthOp): + def init_data(self): + self.ori_shape = (32, 8, 6, 6) + self.infered_shape = (32, 32, 3, 3) + self.one_d_len = 32 * 32 * 3 * 3 + + self.blocksize = 2 + self.x = np.random.random(self.ori_shape).astype('float64') + self.x_1d = np.reshape(self.x, self.one_d_len) + self.out = np.zeros(self.infered_shape).astype('float64') + self.out_1d = np.reshape(self.out, self.one_d_len) + self.forward = 1 + + +class TestSpaceToDepthOpWithStride3(TestSpaceToDepthOp): + def init_data(self): + self.ori_shape = (32, 9, 6, 6) + self.infered_shape = (32, 81, 2, 2) + self.one_d_len = 32 * 81 * 2 * 2 + + self.blocksize = 3 + self.x = np.random.random(self.ori_shape).astype('float32') + self.x_1d = np.reshape(self.x, self.one_d_len) + self.out = np.zeros(self.infered_shape).astype('float32') + self.out_1d = np.reshape(self.out, self.one_d_len) + self.forward = 1 + + +class TestSpaceToDepthOpWithNotSquare(TestSpaceToDepthOp): + def init_data(self): + self.ori_shape = (32, 9, 9, 6) + self.infered_shape = (32, 81, 3, 2) + self.one_d_len = 32 * 81 * 3 * 2 + + self.blocksize = 3 + self.x = np.random.random(self.ori_shape).astype('float32') + self.x_1d = np.reshape(self.x, self.one_d_len) + self.out = np.zeros(self.infered_shape).astype('float32') + self.out_1d = np.reshape(self.out, self.one_d_len) + self.forward = 1 + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_sum_op.py b/python/paddle/fluid/tests/unittests/test_sum_op.py index 643878dc5c2c2854ad3a1b6429d78519b1670857..0be5be6e97d26c6ec42471d078e8e5995727e594 100644 --- a/python/paddle/fluid/tests/unittests/test_sum_op.py +++ b/python/paddle/fluid/tests/unittests/test_sum_op.py @@ -49,11 +49,14 @@ class TestSumOp(OpTest): class TestSelectedRowsSumOp(OpTest): - def check_with_place(self, place, inplace): + def setUp(self): self.height = 10 self.row_numel = 12 self.rows = [0, 1, 2, 3, 4, 5, 6] + self.dtype = np.float32 + self.init_kernel_type() + def check_with_place(self, place, inplace): self.check_input_and_optput(core.Scope(), place, inplace, True, True, True) self.check_input_and_optput(core.Scope(), place, inplace, False, True, @@ -64,12 +67,12 @@ class TestSelectedRowsSumOp(OpTest): False) def init_kernel_type(self): - self.dtype = np.float32 + pass - def _get_array(self, row_num, row_numel): - array = np.ones((row_num, row_numel)).astype(self.dtype) - for i in range(row_num): - array[i] *= i + def _get_array(self, rows, row_numel): + array = np.ones((len(rows), row_numel)).astype(self.dtype) + for i in range(len(rows)): + array[i] *= rows[i] return array def check_input_and_optput(self, @@ -105,7 +108,7 @@ class TestSelectedRowsSumOp(OpTest): self.assertTrue( np.array_equal( np.array(out.get_tensor()), - self._get_array(len(self.rows), self.row_numel) * + self._get_array(self.rows, self.row_numel) * has_data_w_num)) else: self.assertEqual(len(out.rows()), 0) @@ -121,7 +124,7 @@ class TestSelectedRowsSumOp(OpTest): w_selected_rows = var.get_selected_rows() w_selected_rows.set_height(self.height) w_selected_rows.set_rows(rows) - w_array = self._get_array(len(rows), self.row_numel) + w_array = self._get_array(self.rows, self.row_numel) w_tensor = w_selected_rows.get_tensor() w_tensor.set(w_array, place) @@ -136,36 +139,91 @@ class TestSelectedRowsSumOp(OpTest): self.check_with_place(place, inplace) +class TestLoDTensorAndSelectedRowsOp(TestSelectedRowsSumOp): + def setUp(self): + self.height = 10 + self.row_numel = 12 + self.rows = [0, 1, 2, 2, 4, 5, 6] + + def check_with_place(self, place, inplace): + scope = core.Scope() + if inplace: + self.create_lod_tensor(scope, place, "x1") + self.create_selected_rows(scope, place, "x2", True) + out = scope.var("x1").get_tensor() + out_name = "x1" + else: + self.create_selected_rows(scope, place, "x1", True) + self.create_lod_tensor(scope, place, "x2") + out = scope.var("out").get_tensor() + out_name = "out" + + # create and run sum operator + sum_op = Operator("sum", X=["x1", "x2"], Out=out_name) + sum_op.run(scope, place) + + result = np.ones((1, self.height)).astype(np.int32).tolist()[0] + for ele in self.rows: + result[ele] += 1 + + out_t = np.array(out) + self.assertEqual(out_t.shape[0], self.height) + self.assertTrue( + np.array_equal(out_t, + self._get_array([i for i in range( + self.height)], self.row_numel) * np.tile( + np.array(result).reshape(self.height, 1), + self.row_numel))) + + def create_lod_tensor(self, scope, place, var_name): + var = scope.var(var_name) + w_tensor = var.get_tensor() + w_array = self._get_array([i for i in range(self.height)], + self.row_numel) + w_tensor.set(w_array, place) + return var + + +#----------- test fp16 ----------- +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestFP16SumOp(TestSumOp): def init_kernel_type(self): self.dtype = np.float16 def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=2e-2) + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-2) # FIXME: Because of the precision fp16, max_relative_error # should be 0.15 here. def test_check_grad(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_grad(['x0'], 'Out', max_relative_error=0.15) + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_grad(['x0'], 'Out', max_relative_error=0.15) -class TestFP16SelectedRowsSumOp(TestSelectedRowsSumOp): - def init_kernel_type(self): - self.dtype = np.float16 +def create_test_sum_fp16_class(parent): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") + class TestSumFp16Case(parent): + def init_kernel_type(self): + self.dtype = np.float16 - def test_w_is_selected_rows(self): - if core.is_compiled_with_cuda(): + def test_w_is_selected_rows(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): for inplace in [True, False]: self.check_with_place(place, inplace) + cls_name = "{0}_{1}".format(parent.__name__, "SumFp16Test") + TestSumFp16Case.__name__ = cls_name + globals()[cls_name] = TestSumFp16Case + + +create_test_sum_fp16_class(TestSelectedRowsSumOp) +create_test_sum_fp16_class(TestLoDTensorAndSelectedRowsOp) if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 7c7fba76718e911907bb9bef69b3e8688bbf52fc..094eaeb59ce7ab73012f6e6a5fc24778933270c1 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -1706,13 +1706,27 @@ to transpile() call.") outputs=outputs, attrs=opt_op.all_attrs()) - def _is_splited_grad_var(self, var, var_dict): + def _get_pserver_grad_param_var(self, var, var_dict): + """ + Return pserver side grad/param variable, return None + if the variable is not grad/param, e.g. + + a@GRAD -> a@GRAD.block0 + a@GRAD -> a@GRAD (a is not splited) + fc_0.w_0 -> fc_0.w_0.block_0 + fc_0.w_0 -> fc_0.w_0 (weight is not splited) + _generated_var_123 -> None + """ grad_block = None for _, g in six.iteritems(var_dict): if self._orig_varname(g.name) == self._orig_varname(var.name): + # skip per trainer vars if g.name.find(".trainer_") == -1: - grad_block = g - break + # only param or grads have splited blocks + if self._orig_varname(g.name) in self.grad_name_to_param_name or\ + self._orig_varname(g.name) in self.param_name_to_grad_name: + grad_block = g + break return grad_block def _clone_lr_op(self, program, block, op): @@ -1745,32 +1759,38 @@ to transpile() call.") for key, varlist in six.iteritems(inputs): if not isinstance(varlist, list): varlist = [varlist] - for var in varlist: - # for ops like clipping and weight decay, get the splited var + for i in range(len(varlist)): + var = varlist[i] + # for ops like clipping and weight decay, get the splited var (xxx.block0) # for inputs/outputs - grad_block = self._is_splited_grad_var( + grad_block = self._get_pserver_grad_param_var( var, program.global_block().vars) if grad_block: - inputs[key] = grad_block + varlist[i] = grad_block elif var.name not in program.global_block().vars: - program.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) + tmpvar = program.global_block()._clone_variable(var) + varlist[i] = tmpvar + else: + varlist[i] = program.global_block().vars[var.name] + inputs[key] = varlist outputs = self._get_output_map_from_op( self.origin_program.global_block().vars, opt_op) for key, varlist in six.iteritems(outputs): if not isinstance(varlist, list): varlist = [varlist] - for var in varlist: - grad_block = self._is_splited_grad_var( + for i in range(len(varlist)): + var = varlist[i] + grad_block = self._get_pserver_grad_param_var( var, program.global_block().vars) if grad_block: - outputs[key] = grad_block + varlist[i] = grad_block elif var.name not in program.global_block().vars: - program.global_block()._clone_variable(var) + tmpvar = program.global_block()._clone_variable(var) + varlist[i] = tmpvar + else: + varlist[i] = program.global_block().vars[var.name] + outputs[key] = varlist return optimize_block.append_op( type=opt_op.type, diff --git a/python/setup.py.in b/python/setup.py.in index ee19294ad5c884cf73a4f14290f61f0b345ea8c7..b1ff9f3a5c3d877edb6bc6a12efce053a44b4c9c 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -14,7 +14,8 @@ RC = 0 def git_commit(): try: cmd = ['git', 'rev-parse', 'HEAD'] - git_commit = subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0].strip() + git_commit = subprocess.Popen(cmd, stdout = subprocess.PIPE, + cwd="@PADDLE_SOURCE_DIR@").communicate()[0].strip() except: git_commit = 'Unknown' git_commit = git_commit.decode() @@ -44,7 +45,7 @@ def get_patch(): def is_taged(): try: cmd = ['git', 'describe', '--exact-match', '--tags', 'HEAD', '2>/dev/null'] - git_tag = subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0].strip() + git_tag = subprocess.Popen(cmd, stdout = subprocess.PIPE, cwd="@PADDLE_SOURCE_DIR@").communicate()[0].strip() git_tag = git_tag.decode() except: return False @@ -55,8 +56,7 @@ def is_taged(): return False def write_version_py(filename='paddle/version.py'): - cnt = ''' -# THIS FILE IS GENERATED FROM PADDLEPADDLE SETUP.PY + cnt = '''# THIS FILE IS GENERATED FROM PADDLEPADDLE SETUP.PY # full_version = '%(major)d.%(minor)d.%(patch)s' major = '%(major)d'