diff --git a/CMakeLists.txt b/CMakeLists.txt index 7a7b5860a122a853fc9ce1da6494fc039b38bc10..ed704585d8a6bf3befd9a549aa5a62a33fea3da9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -62,13 +62,11 @@ option(WITH_DISTRIBUTE "Compile with distributed support" OFF) option(USE_EIGEN_FOR_BLAS "Use matrix multiplication in Eigen" OFF) option(EIGEN_USE_THREADS "Compile with multi-threaded Eigen" OFF) option(WITH_ARM_FP16 "Use half precision support on armv8.2-a cpu" OFF) -option(WITH_FAST_BUNDLE_TEST "Bundle tests that can be run in a single process together to reduce launch overhead" OFF) option(WITH_CONTRIB "Compile the third-party contributation" OFF) option(REPLACE_ENFORCE_GLOG "Replace PADDLE_ENFORCE with glog/CHECK for better debug." OFF) option(WITH_ANAKIN "Compile with Anakin library" OFF) option(WITH_GRPC "Use grpc as the default rpc framework" ${WITH_DISTRIBUTE}) option(WITH_BRPC_RDMA "Use brpc rdma as the rpc protocal" OFF) -option(WITH_INFERENCE "Compile fluid inference library" ON) option(ON_INFER "Turn on inference optimization." OFF) option(WITH_INFERENCE_API_TEST "Test fluid inference high-level api interface" OFF) option(WITH_SYSTEM_BLAS "Use system blas library" OFF) diff --git a/README.md b/README.md index 8ee67f66423df8bce27f70015be8752457cd9784..56d6c10c642787836abb55cb2974bda0b8d22da4 100644 --- a/README.md +++ b/README.md @@ -2,8 +2,8 @@ [![Build Status](https://travis-ci.org/PaddlePaddle/Paddle.svg?branch=develop)](https://travis-ci.org/PaddlePaddle/Paddle) -[![Documentation Status](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](http://paddlepaddle.org/documentation/docs/en/1.0/getstarted/index_en.html) -[![Documentation Status](https://img.shields.io/badge/中文文档-最新-brightgreen.svg)](http://paddlepaddle.org/documentation/docs/zh/1.0/beginners_guide/index.html) +[![Documentation Status](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](http://paddlepaddle.org/documentation/docs/en/1.1/getstarted/index_en.html) +[![Documentation Status](https://img.shields.io/badge/中文文档-最新-brightgreen.svg)](http://paddlepaddle.org/documentation/docs/zh/1.1/beginners_guide/index.html) [![Release](https://img.shields.io/github/release/PaddlePaddle/Paddle.svg)](https://github.com/PaddlePaddle/Paddle/releases) [![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) @@ -19,7 +19,7 @@ Our vision is to enable deep learning for everyone via PaddlePaddle. Please refer to our [release announcement](https://github.com/PaddlePaddle/Paddle/releases) to track the latest feature of PaddlePaddle. -### Latest PaddlePaddle Release: [Fluid 1.0.1](https://github.com/PaddlePaddle/Paddle/tree/release/1.0.0) +### Latest PaddlePaddle Release: [Fluid 1.1.0](https://github.com/PaddlePaddle/Paddle/tree/release/1.1) ### Install Latest Stable Release: ``` # Linux CPU @@ -27,9 +27,9 @@ pip install paddlepaddle # Linux GPU cuda9cudnn7 pip install paddlepaddle-gpu # Linux GPU cuda8cudnn7 -pip install paddlepaddle-gpu==1.0.1.post87 +pip install paddlepaddle-gpu==1.1.0.post87 # Linux GPU cuda8cudnn5 -pip install paddlepaddle-gpu==1.0.1.post85 +pip install paddlepaddle-gpu==1.1.0.post85 # For installation on other platform, refer to http://paddlepaddle.org/ ``` @@ -76,26 +76,26 @@ pip install paddlepaddle-gpu==1.0.1.post85 ## Installation -It is recommended to read [this doc](http://paddlepaddle.org/documentation/docs/zh/1.0/beginners_guide/index.html) on our website. +It is recommended to read [this doc](http://paddlepaddle.org/documentation/docs/zh/1.1/beginners_guide/index.html) on our website. ## Documentation -We provide [English](http://paddlepaddle.org/documentation/docs/en/1.0.0/getstarted/index_en.html) and -[Chinese](http://paddlepaddle.org/documentation/docs/zh/1.0/beginners_guide/index.html) documentation. +We provide [English](http://paddlepaddle.org/documentation/docs/en/1.1/getstarted/index_en.html) and +[Chinese](http://paddlepaddle.org/documentation/docs/zh/1.1/beginners_guide/index.html) documentation. - [Deep Learning 101](https://github.com/PaddlePaddle/book) You might want to start from this online interactive book that can run in a Jupyter Notebook. -- [Distributed Training](http://paddlepaddle.org/documentation/docs/zh/1.0/user_guides/howto/training/cluster_howto.html) +- [Distributed Training](http://paddlepaddle.org/documentation/docs/zh/1.1/user_guides/howto/training/cluster_howto.html) You can run distributed training jobs on MPI clusters. -- [Python API](http://paddlepaddle.org/documentation/api/zh/1.0/fluid.html) +- [Python API](http://paddlepaddle.org/documentation/api/zh/1.1/fluid.html) Our new API enables much shorter programs. -- [How to Contribute](http://paddlepaddle.org/documentation/docs/zh/1.0/advanced_usage/development/contribute_to_paddle.html) +- [How to Contribute](http://paddlepaddle.org/documentation/docs/zh/1.1/advanced_usage/development/contribute_to_paddle.html) We appreciate your contributions! diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index db17cd004b5a393bb6b6af50b3169e39fa9b3a96..b6b7af951093e4d721e5d0c99e7bb818c67af749 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -64,11 +64,11 @@ paddle.fluid.layers.chunk_eval ArgSpec(args=['input', 'label', 'chunk_scheme', ' paddle.fluid.layers.sequence_conv ArgSpec(args=['input', 'num_filters', 'filter_size', 'filter_stride', 'padding', 'bias_attr', 'param_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(3, 1, None, None, None, None, None)) paddle.fluid.layers.conv2d ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None)) paddle.fluid.layers.conv3d ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None)) -paddle.fluid.layers.sequence_pool ArgSpec(args=['input', 'pool_type'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.sequence_pool ArgSpec(args=['input', 'pool_type', 'is_test'], varargs=None, keywords=None, defaults=(False,)) paddle.fluid.layers.sequence_softmax ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)) paddle.fluid.layers.softmax ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(True, None)) -paddle.fluid.layers.pool2d ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None)) -paddle.fluid.layers.pool3d ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None)) +paddle.fluid.layers.pool2d ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)) +paddle.fluid.layers.pool3d ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)) paddle.fluid.layers.batch_norm ArgSpec(args=['input', 'act', 'is_test', 'momentum', 'epsilon', 'param_attr', 'bias_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var', 'fuse_with_relu'], varargs=None, keywords=None, defaults=(None, False, 0.9, 1e-05, None, None, 'NCHW', False, None, None, None, False, False)) paddle.fluid.layers.beam_search_decode ArgSpec(args=['ids', 'scores', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.conv2d_transpose ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)) @@ -103,7 +103,7 @@ paddle.fluid.layers.beam_search ArgSpec(args=['pre_ids', 'pre_scores', 'ids', 's paddle.fluid.layers.row_conv ArgSpec(args=['input', 'future_context_size', 'param_attr', 'act'], varargs=None, keywords=None, defaults=(None, None)) paddle.fluid.layers.multiplex ArgSpec(args=['inputs', 'index'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.layer_norm ArgSpec(args=['input', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None, None)) -paddle.fluid.layers.softmax_with_cross_entropy ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index'], varargs=None, keywords=None, defaults=(False, -100)) +paddle.fluid.layers.softmax_with_cross_entropy ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index', 'numeric_stable_mode'], varargs=None, keywords=None, defaults=(False, -100, False)) paddle.fluid.layers.smooth_l1 ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.layers.one_hot ArgSpec(args=['input', 'depth'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.autoincreased_step_counter ArgSpec(args=['counter_name', 'begin', 'step'], varargs=None, keywords=None, defaults=(None, 1, 1)) @@ -174,9 +174,11 @@ paddle.fluid.layers.mean ArgSpec(args=['x', 'name'], varargs=None, keywords=None paddle.fluid.layers.mul ArgSpec(args=['x', 'y', 'x_num_col_dims', 'y_num_col_dims', 'name'], varargs=None, keywords=None, defaults=(1, 1, None)) paddle.fluid.layers.sigmoid_cross_entropy_with_logits ArgSpec(args=['x', 'label', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.maxout ArgSpec(args=['x', 'groups', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.affine_grid ArgSpec(args=['theta', 'out_shape', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.sequence_reverse ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.affine_channel ArgSpec(args=['x', 'scale', 'bias', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(None, None, 'NCHW', None)) paddle.fluid.layers.hash ArgSpec(args=['input', 'hash_size', 'num_hash', 'name'], varargs=None, keywords=None, defaults=(1, None)) +paddle.fluid.layers.grid_sampler ArgSpec(args=['x', 'grid', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.log_loss ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(0.0001, None)) paddle.fluid.layers.add_position_encoding ArgSpec(args=['input', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)) diff --git a/paddle/fluid/CMakeLists.txt b/paddle/fluid/CMakeLists.txt index 48b36df6499e59fe742766b5f81fd30a9fb8b900..7d48f0057140cf021a21ea7e304b7e38cc8b9ec2 100644 --- a/paddle/fluid/CMakeLists.txt +++ b/paddle/fluid/CMakeLists.txt @@ -9,8 +9,6 @@ add_subdirectory(pybind) add_subdirectory(recordio) endif(NOT WIN32) -if(WITH_INFERENCE) - # NOTE: please add subdirectory inference at last. - add_subdirectory(inference) - add_subdirectory(train) -endif() +# NOTE: please add subdirectory inference at last. +add_subdirectory(inference) +add_subdirectory(train) diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index 17188ac5f301102ae79c6ace676b84ee66e28801..d8bc72e6b2fa38db06cb077ada9d7ec180299e8c 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -35,13 +35,15 @@ if(WITH_GPU) all_reduce_op_handle reduce_op_handle broadcast_op_handle data_balance_op_handle graph graph_helper pass) endif() +cc_library(sequential_execution_pass SRCS sequential_execution_pass.cc DEPS graph graph_helper pass) + cc_library(multi_devices_graph_pass SRCS multi_devices_graph_pass.cc DEPS multi_devices_helper computation_op_handle scale_loss_grad_op_handle rpc_op_handle all_reduce_op_handle reduce_op_handle broadcast_op_handle data_balance_op_handle fused_broadcast_op_handle) if(WITH_GPU) - cc_library(ssa_graph_executor SRCS ssa_graph_executor.cc DEPS graph framework_proto reference_count_pass) + cc_library(ssa_graph_executor SRCS ssa_graph_executor.cc DEPS graph framework_proto reference_count_pass sequential_execution_pass) else() - cc_library(ssa_graph_executor SRCS ssa_graph_executor.cc DEPS graph framework_proto) + cc_library(ssa_graph_executor SRCS ssa_graph_executor.cc DEPS graph framework_proto sequential_execution_pass) endif() cc_library(threaded_ssa_graph_executor SRCS threaded_ssa_graph_executor.cc DEPS fetch_op_handle ssa_graph_executor scope @@ -56,6 +58,7 @@ cc_library(scope_buffered_ssa_graph_executor SRCS scope_buffered_ssa_graph_execu # device_context reduce_op_handle ) cc_library(fast_threaded_ssa_graph_executor SRCS fast_threaded_ssa_graph_executor.cc DEPS fetch_op_handle ssa_graph_executor scope simple_threadpool device_context) +cc_test(fused_broadcast_op_test SRCS fused_broadcast_op_handle_test.cc DEPS fused_broadcast_op_handle) cc_library(build_strategy SRCS build_strategy.cc DEPS graph_viz_pass multi_devices_graph_pass diff --git a/paddle/fluid/framework/details/all_reduce_op_handle.cc b/paddle/fluid/framework/details/all_reduce_op_handle.cc index 7c5f5bd80a937bf1a1c891155764833d7b21c5c2..b8690156763e4037811245b8016982710445e6a2 100644 --- a/paddle/fluid/framework/details/all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/all_reduce_op_handle.cc @@ -34,7 +34,7 @@ AllReduceOpHandle::AllReduceOpHandle(ir::Node *node, nccl_ctxs_(ctxs) { if (nccl_ctxs_) { for (auto &p : places_) { - this->dev_ctxes_[p] = nccl_ctxs_->DevCtx(p); + this->SetDeviceContext(p, nccl_ctxs_->DevCtx(p)); } } } @@ -46,7 +46,7 @@ AllReduceOpHandle::AllReduceOpHandle(ir::Node *node, #endif void AllReduceOpHandle::RunImpl() { - platform::RecordEvent record_event(Name(), dev_ctxes_.begin()->second); + platform::RecordEvent record_event(Name(), dev_ctxes_.cbegin()->second); if (NoDummyInputSize() == 1) { return; // No need to all reduce when GPU count = 1; @@ -127,7 +127,7 @@ void AllReduceOpHandle::RunImpl() { *local_scopes_[i]->FindVar(kLocalExecScopeName)->Get(); auto &p = places_[i]; auto *var = scope.FindVar(out_var_handles[i]->name_); - auto *dev_ctx = dev_ctxes_[p]; + auto *dev_ctx = dev_ctxes_.at(p); RunAndRecordEvent(p, [&trg, var, dev_ctx, p] { auto &tensor_gpu = *var->GetMutable(); diff --git a/paddle/fluid/framework/details/broadcast_op_handle.h b/paddle/fluid/framework/details/broadcast_op_handle.h index 020d351e891c7afab37c59c0ff8d8e5e7ba184f2..72180fac864256ddda076c57e50ab1083c113d32 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle.h +++ b/paddle/fluid/framework/details/broadcast_op_handle.h @@ -44,7 +44,8 @@ struct BroadcastOpHandle : public OpHandleBase { nccl_ctxs_(nccl_ctxs) { if (nccl_ctxs_) { for (auto &p_ctx : nccl_ctxs_->contexts_) { - dev_ctxes_[platform::CUDAPlace(p_ctx.first)] = p_ctx.second.ctx_.get(); + this->SetDeviceContext(platform::CUDAPlace(p_ctx.first), + p_ctx.second.ctx_.get()); } } } diff --git a/paddle/fluid/framework/details/broadcast_op_handle_test.cc b/paddle/fluid/framework/details/broadcast_op_handle_test.cc index ab7412a19fbd13fa39dbae9af528d158cc9ddbd0..650de5a48de6b1fdab120cdeda563a169fd1a1c1 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle_test.cc +++ b/paddle/fluid/framework/details/broadcast_op_handle_test.cc @@ -12,232 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/details/broadcast_op_handle.h" -#include "gtest/gtest.h" - -#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/framework/details/broadcast_op_handle_test.h" namespace paddle { namespace framework { namespace details { -namespace f = paddle::framework; -namespace p = paddle::platform; - -// test data amount -const f::DDim kDims = {20, 20}; - -struct TestBroadcastOpHandle { - std::vector> ctxs_; - std::vector local_scopes_; - std::vector param_scopes_; - Scope g_scope_; - std::unique_ptr op_handle_; - std::vector> vars_; - std::vector gpu_list_; - bool use_gpu_; -#ifdef PADDLE_WITH_CUDA - std::unique_ptr nccl_ctxs_; -#endif - - void WaitAll() { - for (size_t j = 0; j < ctxs_.size(); ++j) { - ctxs_[j]->Wait(); - } -#ifdef PADDLE_WITH_CUDA - if (nccl_ctxs_) { - nccl_ctxs_->WaitAll(); - } -#endif - } - - void InitCtxOnGpu(bool use_gpu) { - use_gpu_ = use_gpu; - if (use_gpu_) { -#ifdef PADDLE_WITH_CUDA - int count = p::GetCUDADeviceCount(); - if (count <= 1) { - LOG(WARNING) << "Cannot test multi-gpu Broadcast, because the CUDA " - "device count is " - << count; - exit(0); - } - for (int i = 0; i < count; ++i) { - auto p = p::CUDAPlace(i); - gpu_list_.push_back(p); - ctxs_.emplace_back(new p::CUDADeviceContext(p)); - } - nccl_ctxs_.reset(new platform::NCCLContextMap(gpu_list_)); -#else - PADDLE_THROW("CUDA is not support."); -#endif - } else { - int count = 8; - for (int i = 0; i < count; ++i) { - auto p = p::CPUPlace(); - gpu_list_.push_back(p); - ctxs_.emplace_back(new p::CPUDeviceContext(p)); - } -#ifdef PADDLE_WITH_CUDA - nccl_ctxs_.reset(nullptr); -#endif - } - } - - void InitBroadcastOp(size_t input_scope_idx) { - for (size_t j = 0; j < gpu_list_.size(); ++j) { - local_scopes_.push_back(&(g_scope_.NewScope())); - Scope& local_scope = local_scopes_.back()->NewScope(); - *local_scopes_.back() - ->Var(details::kLocalExecScopeName) - ->GetMutable() = &local_scope; - local_scope.Var("out"); - param_scopes_.emplace_back(&local_scope); - } - param_scopes_[input_scope_idx]->Var("input"); - - std::unique_ptr n = - ir::CreateNodeForTest("node0", ir::Node::Type::kOperation); - if (use_gpu_) { -#ifdef PADDLE_WITH_CUDA - op_handle_.reset(new BroadcastOpHandle(n.get(), local_scopes_, gpu_list_, - nccl_ctxs_.get())); -#else - PADDLE_THROW("CUDA is not support."); -#endif - } else { -#ifdef PADDLE_WITH_CUDA - op_handle_.reset(new BroadcastOpHandle(n.get(), local_scopes_, gpu_list_, - nccl_ctxs_.get())); -#else - op_handle_.reset( - new BroadcastOpHandle(n.get(), local_scopes_, gpu_list_)); -#endif - } - - std::unique_ptr v = - ir::CreateNodeForTest("node1", ir::Node::Type::kVariable); - auto* in_var_handle = new VarHandle(v.get(), 1, input_scope_idx, "input", - gpu_list_[input_scope_idx]); - vars_.emplace_back(in_var_handle); - op_handle_->AddInput(in_var_handle); - - // add dummy var - - std::unique_ptr v2 = - ir::CreateNodeForTest("node2", ir::Node::Type::kVariable); - vars_.emplace_back(new DummyVarHandle(v2.get())); - DummyVarHandle* dummy_var_handle = - static_cast(vars_.back().get()); - dummy_var_handle->ClearGeneratedOp(); - op_handle_->AddInput(dummy_var_handle); - - for (size_t j = 0; j < gpu_list_.size(); ++j) { - if (!use_gpu_) { - op_handle_->SetDeviceContext(gpu_list_[j], ctxs_[j].get()); - } - std::unique_ptr v3 = - ir::CreateNodeForTest("node3", ir::Node::Type::kVariable); - VarHandle* out_var_handle = - new VarHandle(v3.get(), 2, j, "out", gpu_list_[j]); - vars_.emplace_back(out_var_handle); - op_handle_->AddOutput(out_var_handle); - } - - // add dummy var - std::unique_ptr v4 = - ir::CreateNodeForTest("node4", ir::Node::Type::kVariable); - vars_.emplace_back(new DummyVarHandle(v4.get())); - DummyVarHandle* out_dummy_var_handle = - static_cast(vars_.back().get()); - out_dummy_var_handle->ClearGeneratedOp(); - op_handle_->AddOutput(out_dummy_var_handle); - } - - void TestBroadcastLodTensor(size_t input_scope_idx) { - auto in_var = param_scopes_[input_scope_idx]->FindVar("input"); - PADDLE_ENFORCE_NOT_NULL(in_var); - auto in_lod_tensor = in_var->GetMutable(); - in_lod_tensor->mutable_data(kDims, gpu_list_[input_scope_idx]); - - std::vector send_vector(static_cast(f::product(kDims))); - for (size_t k = 0; k < send_vector.size(); ++k) { - send_vector[k] = k; - } - f::LoD lod{{0, 10, 20}}; - paddle::framework::TensorFromVector( - send_vector, *(ctxs_[input_scope_idx]), in_lod_tensor); - in_lod_tensor->set_lod(lod); - in_lod_tensor->Resize(kDims); - - op_handle_->Run(false); - - WaitAll(); - - p::CPUPlace cpu_place; - for (size_t j = 0; j < gpu_list_.size(); ++j) { - auto out_var = param_scopes_[j]->FindVar("out"); - PADDLE_ENFORCE_NOT_NULL(out_var); - auto out_tensor = out_var->Get(); - PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal."); - - f::Tensor result_tensor; - f::TensorCopySync(out_tensor, cpu_place, &result_tensor); - float* ct = result_tensor.mutable_data(cpu_place); - - for (int64_t i = 0; i < f::product(kDims); ++i) { - ASSERT_NEAR(ct[i], send_vector[i], 1e-5); - } - } - } - - void TestBroadcastSelectedRows(size_t input_scope_idx) { - auto in_var = param_scopes_[input_scope_idx]->FindVar("input"); - PADDLE_ENFORCE_NOT_NULL(in_var); - auto in_selected_rows = in_var->GetMutable(); - auto value = in_selected_rows->mutable_value(); - value->mutable_data(kDims, gpu_list_[input_scope_idx]); - int height = static_cast(kDims[0]) * 2; - std::vector rows{0, 1, 2, 3, 3, 0, 14, 7, 3, 1, - 2, 4, 6, 3, 1, 1, 1, 1, 3, 7}; - in_selected_rows->set_height(height); - in_selected_rows->set_rows(rows); - - std::vector send_vector(static_cast(f::product(kDims))); - for (size_t k = 0; k < send_vector.size(); ++k) { - send_vector[k] = k; - } - paddle::framework::TensorFromVector( - send_vector, *(ctxs_[input_scope_idx]), value); - - op_handle_->Run(false); - - WaitAll(); - - p::CPUPlace cpu_place; - for (size_t j = 0; j < gpu_list_.size(); ++j) { - auto out_var = param_scopes_[j]->FindVar("out"); - PADDLE_ENFORCE_NOT_NULL(out_var); - auto& out_select_rows = out_var->Get(); - auto rt = out_select_rows.value(); - - PADDLE_ENFORCE_EQ(out_select_rows.height(), height, - "height is not equal."); - for (size_t k = 0; k < out_select_rows.rows().size(); ++k) { - PADDLE_ENFORCE_EQ(out_select_rows.rows()[k], rows[k]); - } - - f::Tensor result_tensor; - f::TensorCopySync(rt, cpu_place, &result_tensor); - float* ct = result_tensor.data(); - - for (int64_t i = 0; i < f::product(kDims); ++i) { - ASSERT_NEAR(ct[i], send_vector[i], 1e-5); - } - } - } -}; - TEST(BroadcastTester, TestCPUBroadcastTestLodTensor) { TestBroadcastOpHandle test_op; size_t input_scope_idx = 0; diff --git a/paddle/fluid/framework/details/broadcast_op_handle_test.h b/paddle/fluid/framework/details/broadcast_op_handle_test.h new file mode 100644 index 0000000000000000000000000000000000000000..1a2a9ac328c4a9b89bfb89106af81b9fb3ed3028 --- /dev/null +++ b/paddle/fluid/framework/details/broadcast_op_handle_test.h @@ -0,0 +1,271 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "gtest/gtest.h" +#include "paddle/fluid/framework/details/broadcast_op_handle.h" + +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace framework { +namespace details { + +namespace f = paddle::framework; +namespace p = paddle::platform; + +// test data amount +const f::DDim kDims = {20, 20}; + +struct TestBroadcastOpHandle { + std::vector> ctxs_; + std::vector local_scopes_; + std::vector param_scopes_; + Scope g_scope_; + std::unique_ptr op_handle_; + std::vector> vars_; + std::vector place_list_; + bool use_gpu_; +#ifdef PADDLE_WITH_CUDA + std::unique_ptr nccl_ctxs_; +#endif + + void WaitAll() { + for (size_t j = 0; j < ctxs_.size(); ++j) { + ctxs_[j]->Wait(); + } +#ifdef PADDLE_WITH_CUDA + if (nccl_ctxs_) { + nccl_ctxs_->WaitAll(); + } +#endif + } + + void InitCtxOnGpu(bool use_gpu) { + use_gpu_ = use_gpu; + if (use_gpu_) { +#ifdef PADDLE_WITH_CUDA + int count = p::GetCUDADeviceCount(); + if (count <= 1) { + LOG(WARNING) << "Cannot test multi-gpu Broadcast, because the CUDA " + "device count is " + << count; + exit(0); + } + for (int i = 0; i < count; ++i) { + auto p = p::CUDAPlace(i); + place_list_.push_back(p); + ctxs_.emplace_back(new p::CUDADeviceContext(p)); + } + nccl_ctxs_.reset(new platform::NCCLContextMap(place_list_)); +#else + PADDLE_THROW("CUDA is not support."); +#endif + } else { + int count = 8; + for (int i = 0; i < count; ++i) { + auto p = p::CPUPlace(); + place_list_.push_back(p); + ctxs_.emplace_back(new p::CPUDeviceContext(p)); + } +#ifdef PADDLE_WITH_CUDA + nccl_ctxs_.reset(nullptr); +#endif + } + } + + void InitBroadcastOp(size_t input_scope_idx) { + for (size_t j = 0; j < place_list_.size(); ++j) { + local_scopes_.push_back(&(g_scope_.NewScope())); + Scope& local_scope = local_scopes_.back()->NewScope(); + *local_scopes_.back() + ->Var(details::kLocalExecScopeName) + ->GetMutable() = &local_scope; + local_scope.Var("out"); + param_scopes_.emplace_back(&local_scope); + } + param_scopes_[input_scope_idx]->Var("input"); + + std::unique_ptr n = + ir::CreateNodeForTest("node0", ir::Node::Type::kOperation); + if (use_gpu_) { +#ifdef PADDLE_WITH_CUDA + op_handle_.reset(new BroadcastOpHandle(n.get(), local_scopes_, + place_list_, nccl_ctxs_.get())); +#else + PADDLE_THROW("CUDA is not support."); +#endif + } else { +#ifdef PADDLE_WITH_CUDA + op_handle_.reset(new BroadcastOpHandle(n.get(), local_scopes_, + place_list_, nccl_ctxs_.get())); +#else + op_handle_.reset( + new BroadcastOpHandle(n.get(), local_scopes_, place_list_)); +#endif + } + + std::unique_ptr v = + ir::CreateNodeForTest("node1", ir::Node::Type::kVariable); + auto* in_var_handle = new VarHandle(v.get(), 1, input_scope_idx, "input", + place_list_[input_scope_idx]); + vars_.emplace_back(in_var_handle); + op_handle_->AddInput(in_var_handle); + + // add dummy var + + std::unique_ptr v2 = + ir::CreateNodeForTest("node2", ir::Node::Type::kVariable); + vars_.emplace_back(new DummyVarHandle(v2.get())); + DummyVarHandle* dummy_var_handle = + static_cast(vars_.back().get()); + dummy_var_handle->ClearGeneratedOp(); + op_handle_->AddInput(dummy_var_handle); + + for (size_t j = 0; j < place_list_.size(); ++j) { + if (!use_gpu_) { + op_handle_->SetDeviceContext(place_list_[j], ctxs_[j].get()); + } + std::unique_ptr v3 = + ir::CreateNodeForTest("node3", ir::Node::Type::kVariable); + VarHandle* out_var_handle = + new VarHandle(v3.get(), 2, j, "out", place_list_[j]); + vars_.emplace_back(out_var_handle); + op_handle_->AddOutput(out_var_handle); + } + + // add dummy var + std::unique_ptr v4 = + ir::CreateNodeForTest("node4", ir::Node::Type::kVariable); + vars_.emplace_back(new DummyVarHandle(v4.get())); + DummyVarHandle* out_dummy_var_handle = + static_cast(vars_.back().get()); + out_dummy_var_handle->ClearGeneratedOp(); + op_handle_->AddOutput(out_dummy_var_handle); + } + + std::vector InitLoDTensor(const std::string& varname, + size_t input_scope_idx, const f::LoD& lod, + float val_scalar = 0.0) { + auto var = param_scopes_[input_scope_idx]->FindVar(varname); + + PADDLE_ENFORCE_NOT_NULL(var); + auto lod_tensor = var->GetMutable(); + std::vector send_vector(static_cast(f::product(kDims))); + for (size_t k = 0; k < send_vector.size(); ++k) { + send_vector[k] = k + val_scalar; + } + paddle::framework::TensorFromVector( + send_vector, *(ctxs_[input_scope_idx]), lod_tensor); + lod_tensor->set_lod(lod); + lod_tensor->Resize(kDims); + return send_vector; + } + + std::vector InitSelectedRows(const std::string& varname, + size_t input_scope_idx, + const std::vector& rows, + int height, float value_scalar = 0.0) { + std::vector send_vector(static_cast(f::product(kDims))); + for (size_t k = 0; k < send_vector.size(); ++k) { + send_vector[k] = k + value_scalar; + } + + auto var = param_scopes_[input_scope_idx]->FindVar(varname); + PADDLE_ENFORCE_NOT_NULL(var); + auto selected_rows = var->GetMutable(); + auto value = selected_rows->mutable_value(); + value->mutable_data(kDims, place_list_[input_scope_idx]); + selected_rows->set_height(height); + selected_rows->set_rows(rows); + + paddle::framework::TensorFromVector( + send_vector, *(ctxs_[input_scope_idx]), value); + + return send_vector; + } + + void SelectedRowsEqual(const std::string& varname, int input_scope_idx, + const std::vector& send_vector, + const std::vector& rows, int height) { + auto var = param_scopes_[input_scope_idx]->FindVar(varname); + PADDLE_ENFORCE_NOT_NULL(var); + auto& selected_rows = var->Get(); + auto rt = selected_rows.value(); + PADDLE_ENFORCE_EQ(selected_rows.height(), height, "height is not equal."); + + for (size_t k = 0; k < selected_rows.rows().size(); ++k) { + PADDLE_ENFORCE_EQ(selected_rows.rows()[k], rows[k]); + } + + p::CPUPlace cpu_place; + f::Tensor result_tensor; + f::TensorCopySync(rt, cpu_place, &result_tensor); + float* ct = result_tensor.data(); + + for (int64_t i = 0; i < f::product(kDims); ++i) { + ASSERT_NEAR(ct[i], send_vector[i], 1e-5); + } + } + + void LoDTensorEqual(const std::string& varname, + const std::vector& send_vec, const f::LoD& lod, + framework::Scope* scope) { + p::CPUPlace cpu_place; + auto var = scope->FindVar(varname); + PADDLE_ENFORCE_NOT_NULL(var); + auto tensor = var->Get(); + PADDLE_ENFORCE_EQ(tensor.lod(), lod, "lod is not equal."); + f::Tensor result_tensor; + f::TensorCopySync(tensor, cpu_place, &result_tensor); + float* ct = result_tensor.mutable_data(cpu_place); + for (int64_t k = 0; k < f::product(kDims); ++k) { + ASSERT_NEAR(ct[k], send_vec[k], 1e-5); + } + } + + void TestBroadcastLodTensor(size_t input_scope_idx) { + f::LoD lod{{0, 10, 20}}; + auto send_vector = InitLoDTensor("input", input_scope_idx, lod); + + op_handle_->Run(false); + + WaitAll(); + for (size_t j = 0; j < place_list_.size(); ++j) { + LoDTensorEqual("out", send_vector, lod, param_scopes_[j]); + } + } + + void TestBroadcastSelectedRows(size_t input_scope_idx) { + std::vector rows{0, 1, 2, 3, 3, 0, 14, 7, 3, 1, + 2, 4, 6, 3, 1, 1, 1, 1, 3, 7}; + int height = static_cast(kDims[0] * 2); + auto send_vector = InitSelectedRows("input", input_scope_idx, rows, height); + + op_handle_->Run(false); + + WaitAll(); + for (size_t j = 0; j < place_list_.size(); ++j) { + SelectedRowsEqual("out", input_scope_idx, send_vector, rows, height); + } + } +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/build_strategy.cc b/paddle/fluid/framework/details/build_strategy.cc index fefd27fc86fb8dce3311fa580d90f518906dd862..bc19bd36610bf144f163c8ebf582d4afbc6592e3 100644 --- a/paddle/fluid/framework/details/build_strategy.cc +++ b/paddle/fluid/framework/details/build_strategy.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/details/multi_devices_graph_check_pass.h" #include "paddle/fluid/framework/details/multi_devices_graph_print_pass.h" +#include "paddle/fluid/framework/details/sequential_execution_pass.h" #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph_viz_pass.h" @@ -27,6 +28,10 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder { public: explicit ParallelExecutorPassBuilder(const BuildStrategy &strategy) : ir::PassBuilder(), strategy_(strategy) { + if (strategy_.enable_sequential_execution_) { + AppendPass("sequential_execution_pass"); + } + // Add a graph viz pass to record a graph. if (!strategy_.debug_graphviz_path_.empty()) { auto viz_pass = AppendPass("graph_viz_pass"); @@ -110,6 +115,11 @@ std::unique_ptr BuildStrategy::Apply( pass->Erase("nccl_ctxs"); pass->SetNotOwned("nccl_ctxs", nctx); #endif + } else if (pass->Type() == "sequential_execution_pass") { + pass->Erase(kAllOpDescs); + pass->Set>( + kAllOpDescs, + new std::vector(main_program.Block(0).AllOps())); } graph = pass->Apply(std::move(graph)); } @@ -125,3 +135,4 @@ USE_PASS(multi_batch_merge_pass); USE_PASS(multi_devices_pass); USE_PASS(multi_devices_check_pass); USE_PASS(multi_devices_print_pass); +USE_PASS(sequential_execution_pass); diff --git a/paddle/fluid/framework/details/build_strategy.h b/paddle/fluid/framework/details/build_strategy.h index f3ffaf6ecd7c4dd99c40fe58ba88c0cbdc14bde7..88459320b0eb6d6c4405bff4c8b13c99aa7edb0d 100644 --- a/paddle/fluid/framework/details/build_strategy.h +++ b/paddle/fluid/framework/details/build_strategy.h @@ -69,6 +69,8 @@ struct BuildStrategy { bool enable_data_balance_{false}; + bool enable_sequential_execution_{false}; + bool fuse_broadcast_op_{false}; // User normally doesn't need to call this API. diff --git a/paddle/fluid/framework/details/computation_op_handle.cc b/paddle/fluid/framework/details/computation_op_handle.cc index b6282debdb4eb6b1f29c39e54ac4f3e2296838da..f9bbfe0016ce0ea0d15a83cb532c44518549b8ad 100644 --- a/paddle/fluid/framework/details/computation_op_handle.cc +++ b/paddle/fluid/framework/details/computation_op_handle.cc @@ -37,7 +37,7 @@ void ComputationOpHandle::RunImpl() { bool ComputationOpHandle::NeedWait(VarHandleBase *in_var) { bool need_wait = in_var && in_var->GeneratedOp() && - in_var->GeneratedOp()->DeviceContext(place_) != dev_ctxes_[place_]; + in_var->GeneratedOp()->DeviceContext(place_) != dev_ctxes_.at(place_); return need_wait; } diff --git a/paddle/fluid/framework/details/data_balance_op_handle.cc b/paddle/fluid/framework/details/data_balance_op_handle.cc index 525d24322442ef4dd6e8c24212af61c908959b87..0b772f9b63e2cfb78175f5e0d7011db8e6a5ec20 100644 --- a/paddle/fluid/framework/details/data_balance_op_handle.cc +++ b/paddle/fluid/framework/details/data_balance_op_handle.cc @@ -28,7 +28,7 @@ DataBalanceOpHandle::DataBalanceOpHandle( : OpHandleBase(node), local_scopes_(local_scopes), places_(places) { if (ctxs) { for (auto &p : places_) { - this->dev_ctxes_[p] = ctxs->DevCtx(p); + this->SetDeviceContext(p, ctxs->DevCtx(p)); } } } @@ -89,8 +89,8 @@ void DataBalanceOpHandle::RunImpl() { PADDLE_ENFORCE_GT(places_.size(), 1, "Data balance can only be enabled when the number of " "places to run larger than 1."); - auto in_var_handles = DynamicCast(inputs_); - auto out_var_handles = DynamicCast(outputs_); + auto in_var_handles = DynamicCast(this->Inputs()); + auto out_var_handles = DynamicCast(this->Outputs()); PADDLE_ENFORCE(in_var_handles.size() % places_.size() == 0); PADDLE_ENFORCE_EQ( in_var_handles.size(), out_var_handles.size(), diff --git a/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc index 6e22fedf1c39428528c00cce4c9a4460dfb95cb3..98fc390e72fab3701538fd6f974460fa5114fdb0 100644 --- a/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc @@ -92,13 +92,13 @@ FeedFetchList FastThreadedSSAGraphExecutor::Run( size_t num_complete = 0; remaining_ = 0; - BlockingQueue complete_q; + auto complete_q = std::make_shared>(); for (auto op : bootstrap_ops_) { - RunOpAsync(op_deps.get(), op, &complete_q); + RunOpAsync(op_deps.get(), op, complete_q); } while (num_complete != op_deps->size()) { - size_t num_comp = complete_q.Pop(); + size_t num_comp = complete_q->Pop(); if (num_comp == -1UL) { int remaining = 0; while (true) { @@ -107,7 +107,7 @@ FeedFetchList FastThreadedSSAGraphExecutor::Run( break; } for (int i = 0; i < remaining; ++i) { - complete_q.Pop(); + complete_q->Pop(); } } exception_.ReThrow(); @@ -120,7 +120,8 @@ FeedFetchList FastThreadedSSAGraphExecutor::Run( } void FastThreadedSSAGraphExecutor::RunOpAsync( std::unordered_map> *op_deps, - OpHandleBase *op, BlockingQueue *complete_q) { + OpHandleBase *op, + const std::shared_ptr> &complete_q) { ++remaining_; this->pool_.enqueue([=] { OpHandleBase *op_to_run = op; @@ -144,7 +145,7 @@ void FastThreadedSSAGraphExecutor::RunOpAsync( if (op_to_run == nullptr) { op_to_run = pending_op; } else { - this->RunOpAsync(op_deps, pending_op, complete_q); + RunOpAsync(op_deps, pending_op, complete_q); } } } @@ -156,8 +157,7 @@ void FastThreadedSSAGraphExecutor::RunOpAsync( } void FastThreadedSSAGraphExecutor::PrepareAtomicOpDeps() { atomic_op_deps_ = pool_.enqueue([&] { - std::unordered_map> *op_deps = - new std::unordered_map>; + auto *op_deps = new std::unordered_map>; for (auto &pair : op_deps_) { (*op_deps)[pair.first] = pair.second; } diff --git a/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h b/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h index dad3a231cba6402f57ba654a9ac5fb520b9c8f04..8b8382447105c8caa36963214684d6ee9fa15200 100644 --- a/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h @@ -50,7 +50,8 @@ class FastThreadedSSAGraphExecutor : public SSAGraphExecutor { std::atomic remaining_; void RunOpAsync(std::unordered_map> *op_deps, - OpHandleBase *op, BlockingQueue *complete_q); + OpHandleBase *op, + const std::shared_ptr> &complete_q); void PrepareAtomicOpDeps(); diff --git a/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc b/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..0f12bd2b4e857648342aeb5ad33b6c0fe01c9c73 --- /dev/null +++ b/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc @@ -0,0 +1,165 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/fused_broadcast_op_handle.h" +#include "gtest/gtest.h" +#include "paddle/fluid/framework/details/broadcast_op_handle_test.h" + +namespace paddle { +namespace framework { +namespace details { + +struct TestFusedBroadcastOpHandle : TestBroadcastOpHandle { + std::vector out_varnames_; + + void InitFusedBroadcastOp(std::vector input_scope_idxes) { + // initialize scope and var + for (size_t i = 0; i < place_list_.size(); ++i) { + local_scopes_.push_back(&(g_scope_.NewScope())); + Scope& local_scope = local_scopes_.back()->NewScope(); + *local_scopes_.back() + ->Var(details::kLocalExecScopeName) + ->GetMutable() = &local_scope; + for (size_t j = 0; j < input_scope_idxes.size(); ++j) { + local_scope.Var("out_var" + j); + if (i == j) local_scope.Var("in_var" + j); + } + param_scopes_.emplace_back(&local_scope); + } + + // create op handle node + std::unique_ptr n = + ir::CreateNodeForTest("fused_broadcast", ir::Node::Type::kOperation); + if (use_gpu_) { +#ifdef PADDLE_WITH_CUDA + op_handle_.reset(new FusedBroadcastOpHandle( + n.get(), local_scopes_, place_list_, nccl_ctxs_.get())); +#else + PADDLE_THROW("CUDA is not supported."); +#endif + } else { +#ifdef PADDLE_WITH_CUDA + op_handle_.reset(new FusedBroadcastOpHandle( + n.get(), local_scopes_, place_list_, nccl_ctxs_.get())); +#else + op_handle_.reset( + new FusedBroadcastOpHandle(n.get(), local_scopes_, place_list_)); +#endif + } + + for (size_t i = 0; i < input_scope_idxes.size(); ++i) { + // add input var handle + std::unique_ptr in_node = + ir::CreateNodeForTest("in_node" + i, ir::Node::Type::kVariable); + VarHandle* in_var_handle = + new VarHandle(in_node.get(), 1, input_scope_idxes[i], "in_var" + i, + place_list_[input_scope_idxes[i]]); + vars_.emplace_back(in_var_handle); + op_handle_->AddInput(in_var_handle); + + // add output var handle + for (size_t j = 0; j < place_list_.size(); ++j) { + std::unique_ptr out_node = + ir::CreateNodeForTest("out_node" + i, ir::Node::Type::kVariable); + VarHandle* out_var_handle = + new VarHandle(out_node.get(), 2, j, "out_var" + i, place_list_[j]); + vars_.emplace_back(out_var_handle); + op_handle_->AddOutput(out_var_handle); + } + } + } + + void TestFusedBroadcastLoDTensor(std::vector input_scope_idxes) { + std::vector> send_vec; + f::LoD lod{{0, 10, 20}}; + for (size_t i = 0; i < input_scope_idxes.size(); ++i) { + const std::string varname("in_var" + i); + float val_scalar = static_cast(i); + send_vec.push_back( + InitLoDTensor(varname, input_scope_idxes[i], lod, val_scalar)); + } + + op_handle_->Run(false); + + WaitAll(); + for (size_t i = 0; i < input_scope_idxes.size(); ++i) { + const std::string& varname("out_var" + i); + for (size_t j = 0; j < place_list_.size(); ++j) { + LoDTensorEqual(varname, send_vec[i], lod, param_scopes_[j]); + } + } + } + + void TestFusedBroadcastSelectedRows(std::vector input_scope_idxes) { + std::vector> send_vector; + std::vector rows{0, 1, 2, 3, 3, 0, 14, 7, 3, 1, + 2, 4, 6, 3, 1, 1, 1, 1, 3, 7}; + int height = static_cast(kDims[0] * 2); + for (size_t i = 0; i < input_scope_idxes.size(); ++i) { + const std::string varname("in_var" + i); + float val_scalar = static_cast(i); + send_vector.push_back(InitSelectedRows(varname, input_scope_idxes[i], + rows, height, val_scalar)); + } + + op_handle_->Run(false); + + WaitAll(); + for (size_t i = 0; i < input_scope_idxes.size(); ++i) { + const std::string& varname("out_var" + i); + for (size_t j = 0; j < place_list_.size(); ++j) { + SelectedRowsEqual(varname, input_scope_idxes[i], send_vector[i], rows, + height); + } + } + } +}; + +TEST(FusedBroadcastTester, CPULodTensor) { + TestFusedBroadcastOpHandle test_op; + std::vector input_scope_idxes = {0, 1}; + test_op.InitCtxOnGpu(false); + test_op.InitFusedBroadcastOp(input_scope_idxes); + test_op.TestFusedBroadcastLoDTensor(input_scope_idxes); +} + +TEST(FusedBroadcastTester, CPUSelectedRows) { + TestFusedBroadcastOpHandle test_op; + std::vector input_scope_idxes = {0, 1}; + test_op.InitCtxOnGpu(false); + test_op.InitFusedBroadcastOp(input_scope_idxes); + test_op.TestFusedBroadcastSelectedRows(input_scope_idxes); +} + +#ifdef PADDLE_WITH_CUDA +TEST(FusedBroadcastTester, GPULodTensor) { + TestFusedBroadcastOpHandle test_op; + std::vector input_scope_idxes = {0, 1}; + test_op.InitCtxOnGpu(true); + test_op.InitFusedBroadcastOp(input_scope_idxes); + test_op.TestFusedBroadcastLoDTensor(input_scope_idxes); +} + +TEST(FusedBroadcastTester, GPUSelectedRows) { + TestFusedBroadcastOpHandle test_op; + std::vector input_scope_idxes = {0, 1}; + test_op.InitCtxOnGpu(true); + test_op.InitFusedBroadcastOp(input_scope_idxes); + test_op.TestFusedBroadcastSelectedRows(input_scope_idxes); +} +#endif + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/gather_op_handle.cc b/paddle/fluid/framework/details/gather_op_handle.cc index 9aae19fc73de4387186da47c55710c94d53f1b88..ca4633c5a8f22fc9f7319b06aa766f9fe37dc68c 100644 --- a/paddle/fluid/framework/details/gather_op_handle.cc +++ b/paddle/fluid/framework/details/gather_op_handle.cc @@ -36,7 +36,7 @@ void GatherOpHandle::RunImpl() { VarHandle *out_var_handle; { - auto out_var_handles = DynamicCast(outputs_); + auto out_var_handles = DynamicCast(this->Outputs()); PADDLE_ENFORCE_EQ(out_var_handles.size(), 1, "The number of output should be one."); out_var_handle = out_var_handles.front(); @@ -99,7 +99,7 @@ void GatherOpHandle::RunImpl() { Tensor *out_tensor = out_value->mutable_value(); // copy - auto dev_ctx = dev_ctxes_[out_var_handle->place_]; + auto dev_ctx = dev_ctxes_.at(out_var_handle->place_); RunAndRecordEvent(out_var_handle->place_, [in_tensors, out_tensor, &dev_ctx, t_out_p] { int s = 0, e = 0; diff --git a/paddle/fluid/framework/details/op_handle_base.cc b/paddle/fluid/framework/details/op_handle_base.cc index 3812f0abf1b7069525c4420054c61c01c908acfe..4822627ac3b65972f41d9a23d9fe3dba3de3f97d 100644 --- a/paddle/fluid/framework/details/op_handle_base.cc +++ b/paddle/fluid/framework/details/op_handle_base.cc @@ -103,7 +103,7 @@ void OpHandleBase::WaitInputVarGenerated() { void OpHandleBase::WaitInputVarGenerated(const platform::Place &place) { for (auto *in : inputs_) { if (NeedWait(in)) { - in->GeneratedOp()->RecordWaitEventOnCtx(dev_ctxes_[place]); + in->GeneratedOp()->RecordWaitEventOnCtx(dev_ctxes_.at(place)); } } } diff --git a/paddle/fluid/framework/details/reduce_op_handle.cc b/paddle/fluid/framework/details/reduce_op_handle.cc index 7fc06f234d42a992328c0b6164f17945d8075c28..4503123eac810917cabcf1e62cff98552ed2f742 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.cc +++ b/paddle/fluid/framework/details/reduce_op_handle.cc @@ -27,7 +27,7 @@ namespace framework { namespace details { void ReduceOpHandle::RunImpl() { - platform::RecordEvent record_event(Name(), dev_ctxes_.begin()->second); + platform::RecordEvent record_event(Name(), dev_ctxes_.cbegin()->second); if (places_.size() == 1) return; // the input and output may have dummy var. diff --git a/paddle/fluid/framework/details/reduce_op_handle.h b/paddle/fluid/framework/details/reduce_op_handle.h index a6289b055f97b7b0e57928358d84117b33cf2df8..999828ae457ba43541da06088ce7c25331fd05ec 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.h +++ b/paddle/fluid/framework/details/reduce_op_handle.h @@ -46,7 +46,8 @@ struct ReduceOpHandle : public OpHandleBase { nccl_ctxs_(nccl_ctxs) { if (nccl_ctxs_) { for (auto &p_ctx : nccl_ctxs_->contexts_) { - dev_ctxes_[platform::CUDAPlace(p_ctx.first)] = p_ctx.second.ctx_.get(); + this->SetDeviceContext(platform::CUDAPlace(p_ctx.first), + p_ctx.second.ctx_.get()); } } } diff --git a/paddle/fluid/framework/details/rpc_op_handle.cc b/paddle/fluid/framework/details/rpc_op_handle.cc index f44b374edb29228dff5a8bf003d945291f166d49..65df7f2d510bf4e3e930398182c6dd1eae89241f 100644 --- a/paddle/fluid/framework/details/rpc_op_handle.cc +++ b/paddle/fluid/framework/details/rpc_op_handle.cc @@ -38,7 +38,7 @@ void RPCOpHandle::RunImpl() { continue; } if (in->GeneratedOp()) { - in->GeneratedOp()->RecordWaitEventOnCtx(dev_ctxes_[p]); + in->GeneratedOp()->RecordWaitEventOnCtx(dev_ctxes_.at(p)); } } auto &tmp_scope = local_scope_->FindVar(kLocalExecScopeName)->Get(); diff --git a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc index ba243979b34aa1f683de707525403becaf0a1c00..ef1626599795a553e654fe5d3ed74ef3a3a67d78 100644 --- a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc +++ b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc @@ -27,7 +27,7 @@ ScaleLossGradOpHandle::ScaleLossGradOpHandle(ir::Node *node, size_t num_dev, coeff_(static_cast(1.0 / num_dev)), scope_(scope), place_(place) { - dev_ctxes_[place_] = dev_ctx; + this->SetDeviceContext(place_, dev_ctx); } ScaleLossGradOpHandle::~ScaleLossGradOpHandle() {} @@ -46,9 +46,9 @@ void ScaleLossGradOpHandle::RunImpl() { } else { #ifdef PADDLE_WITH_CUDA this->RunAndRecordEvent([&] { - auto stream = - static_cast(this->dev_ctxes_[place_]) - ->stream(); + auto stream = static_cast( + this->dev_ctxes_.at(place_)) + ->stream(); memory::Copy(boost::get(place_), tmp, platform::CPUPlace(), &coeff_, sizeof(float), stream); VLOG(10) << place_ << "RUN Scale loss grad op"; diff --git a/paddle/fluid/framework/details/sequential_execution_pass.cc b/paddle/fluid/framework/details/sequential_execution_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..cc2c8bfef9f9f54c2e499467df0d22ce3f69d6b8 --- /dev/null +++ b/paddle/fluid/framework/details/sequential_execution_pass.cc @@ -0,0 +1,109 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/sequential_execution_pass.h" +#include +#include +#include +#include +#include "paddle/fluid/framework/op_proto_maker.h" + +namespace paddle { +namespace framework { +namespace details { + +static bool IsSameOpDesc(OpDesc *op1, OpDesc *op2) { + return op1->Type() == op2->Type() && op1->Inputs() == op2->Inputs() && + op1->Outputs() == op2->Outputs(); +} + +std::unique_ptr SequentialExecutionPass::ApplyImpl( + std::unique_ptr graph) const { + // FIXME(zjl): Insert dependencies between some distributed ops may cause + // the multi_devices_graph_pass fails. So we skip these ops here. + // Indeed, maybe we should not insert dependencies between these ops + // casually, which may cause deadlock easily. + // We should add more skipped distributed ops when found errors in + // multi_devices_graph_pass + static std::unordered_set skip_dist_ops{ + "send", "recv", "send_barrier", "fetch_barrier"}; + + auto &ops = Get>(kAllOpDescs); + std::vector op_node_list; + op_node_list.reserve(ops.size()); + + std::unordered_map op_deps; + std::unordered_map> pending_ops; + std::unordered_set ready_ops; + + for (ir::Node *node : graph->Nodes()) { + if (!node->IsOp()) continue; + std::unordered_set preceding_ops; + for (auto *in : node->inputs) { + PADDLE_ENFORCE(in->IsVar(), + "Preceding Node of Op Nodes must be Var Node"); + if (in->inputs.empty()) continue; + PADDLE_ENFORCE(in->inputs.size() == 1 && in->inputs[0]->IsOp(), + "Preceding Op Node of Var Node must be unique"); + preceding_ops.insert(in->inputs[0]); + pending_ops[in->inputs[0]].insert(node); + } + op_deps[node] = preceding_ops.size(); + if (preceding_ops.empty()) { + ready_ops.insert(node); + } + } + + for (auto *op_desc : ops) { + ir::Node *found_node = nullptr; + for (auto *node : ready_ops) { + if (IsSameOpDesc(op_desc, node->Op())) { + PADDLE_ENFORCE(found_node == nullptr, + "Found multiple op_desc in graph: %s", op_desc->Type()); + found_node = node; + } + } + + PADDLE_ENFORCE_NOT_NULL(found_node, "Cannot find op_desc in graph: %s", + op_desc->Type()); + for (auto *pending_op : pending_ops[found_node]) { + if (--op_deps.at(pending_op) == 0) { + ready_ops.insert(pending_op); + } + } + ready_ops.erase(found_node); + if (skip_dist_ops.count(op_desc->Type()) == 0) { + op_node_list.push_back(found_node); + } + } + + for (size_t i = 1; i < op_node_list.size(); ++i) { + auto *dep_var = graph->CreateControlDepVar(); + op_node_list[i]->inputs.push_back(dep_var); + op_node_list[i - 1]->outputs.push_back(dep_var); + dep_var->outputs.push_back(op_node_list[i]); + dep_var->inputs.push_back(op_node_list[i - 1]); + VLOG(10) << "Add dependencies between " << op_node_list[i - 1]->Name() + << " and " << op_node_list[i]->Name(); + } + return graph; +} + +} // namespace details +} // namespace framework +} // namespace paddle + +REGISTER_PASS(sequential_execution_pass, + paddle::framework::details::SequentialExecutionPass) + .RequirePassAttr(paddle::framework::details::kAllOpDescs); diff --git a/paddle/fluid/framework/details/sequential_execution_pass.h b/paddle/fluid/framework/details/sequential_execution_pass.h new file mode 100644 index 0000000000000000000000000000000000000000..a04c08bc2eb3bae797d648b30a22a5fee7ba0eaa --- /dev/null +++ b/paddle/fluid/framework/details/sequential_execution_pass.h @@ -0,0 +1,34 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/pass.h" + +namespace paddle { +namespace framework { +namespace details { + +constexpr char kAllOpDescs[] = "all_op_descs"; + +class SequentialExecutionPass : public ir::Pass { + protected: + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index 31beef3ae829d72570ee7c879dac71ed600cd216..dc63effd1b7c8fe5bb3fc91058eb855e552d3926 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -39,7 +39,7 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( new platform::RecordEvent("ThreadedSSAGraphExecutorPrepare", nullptr)); std::unordered_map pending_ops; std::unordered_set pending_vars; - BlockingQueue ready_vars; + auto ready_vars = std::make_shared>(); std::unordered_set ready_ops; // For ops (e.g. nccl_all_reduce) that need to coordinate multiple // streams from multiple GPUs, it's faster to buffer them and schedule @@ -51,12 +51,12 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( for (auto &var_map : graph_->Get(details::kGraphVars)) { for (auto &name_pair : var_map) { for (auto &version_pair : name_pair.second) { - InsertPendingVar(&pending_vars, &ready_vars, version_pair.get()); + InsertPendingVar(&pending_vars, ready_vars.get(), version_pair.get()); } } } for (auto &var : graph_->Get(details::kGraphDepVars)) { - InsertPendingVar(&pending_vars, &ready_vars, var.get()); + InsertPendingVar(&pending_vars, ready_vars.get(), var.get()); } for (auto &op : graph_->Get(details::kGraphOps)) { @@ -73,12 +73,12 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( FeedFetchList fetch_data(fetch_tensors.size()); InsertFetchOps(fetch_tensors, &fetch_ops, &fetch_dependencies, &pending_ops, - &pending_vars, &ready_vars, &fetch_data); + &pending_vars, ready_vars.get(), &fetch_data); auto run_all_ops = [&](std::unordered_set &set) { for (auto *op : set) { running_ops_++; - RunOp(&ready_vars, op); + RunOp(ready_vars, op); } set.clear(); }; @@ -87,7 +87,6 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( run_op_futures_.clear(); exception_holder_.Clear(); event.reset(nullptr); - // Step 3. Execution while (!pending_vars.empty()) { // 1. Run All Ready ops @@ -103,7 +102,7 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( // 2. Find ready variable bool timeout; - auto cur_ready_vars = ready_vars.PopAll(1, &timeout); + auto cur_ready_vars = ready_vars->PopAll(1, &timeout); if (timeout) { if (exception_holder_.IsCaught()) { @@ -133,7 +132,6 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( } } PADDLE_ENFORCE(ready_ops.empty()); - // Wait FetchOps. ClearFetchOp(graph_.get(), &fetch_ops); @@ -206,7 +204,8 @@ void ThreadedSSAGraphExecutor::InsertPendingVar( } void ThreadedSSAGraphExecutor::RunOp( - BlockingQueue *ready_var_q, details::OpHandleBase *op) { + const std::shared_ptr> &ready_var_q, + details::OpHandleBase *op) { auto op_run = [ready_var_q, op, this] { try { if (VLOG_IS_ON(10)) { diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h index 512f8a4ca5a9b82a395dde11722b8db44ea5ec27..dbb0b498d995a897b109bd4ef98521b2193276ed 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h @@ -51,7 +51,7 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor { ~ThreadedSSAGraphExecutor() {} private: - void RunOp(BlockingQueue *ready_var_q, + void RunOp(const std::shared_ptr> &ready_var_q, details::OpHandleBase *op); private: diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index ce006b7a3fbc16f3c9149933390969b14a46b484..28231a53bad50fe9f19cfe3e73c3dc09aa3762cf 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -41,6 +41,7 @@ pass_library(conv_bn_fuse_pass inference) pass_library(seqconv_eltadd_relu_fuse_pass inference) if(WITH_MKLDNN) pass_library(mkldnn_placement_pass base) + pass_library(depthwise_conv_mkldnn_pass base) pass_library(conv_bias_mkldnn_fuse_pass inference) pass_library(conv_relu_mkldnn_fuse_pass inference) pass_library(conv_elementwise_add_mkldnn_fuse_pass inference) @@ -59,6 +60,7 @@ cc_test(graph_to_program_pass_test SRCS graph_to_program_pass_test.cc DEPS graph cc_test(test_graph_pattern_detector SRCS graph_pattern_detector_tester.cc DEPS graph_pattern_detector) cc_test(test_fc_fuse_pass SRCS fc_fuse_pass_tester.cc DEPS fc_fuse_pass framework_proto) if (WITH_MKLDNN) + cc_test(test_depthwise_conv_mkldnn_pass SRCS depthwise_conv_mkldnn_pass_tester.cc DEPS depthwise_conv_mkldnn_pass) cc_test(test_conv_relu_mkldnn_fuse_pass SRCS conv_relu_mkldnn_fuse_pass_tester.cc DEPS conv_relu_mkldnn_fuse_pass) cc_test(test_conv_elementwise_add_mkldnn_fuse_pass SRCS conv_elementwise_add_mkldnn_fuse_pass_tester.cc DEPS conv_elementwise_add_mkldnn_fuse_pass) endif () diff --git a/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.h b/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.h index b5de0d548713772e7ad41cfb6d8b3e9460683efb..fe585bd7c41bb32ae00462e989ab4c0051fc89a8 100644 --- a/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.h +++ b/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.h @@ -31,7 +31,8 @@ class ConvReLUFusePass : public FusePassBase { virtual ~ConvReLUFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; }; } // namespace ir diff --git a/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass_tester.cc index 8f4bab25ed4919881baf19a961a52aa229e06a8f..19248b4dfee1da81d18cd2effac08ba68dde80fb 100644 --- a/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass_tester.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.h" #include +#include "paddle/fluid/framework/op_proto_maker.h" namespace paddle { namespace framework { @@ -36,6 +37,8 @@ void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name, op->SetInput("X", inputs); } op->SetOutput("Out", outputs); + op->SetAttr(OpProtoAndCheckerMaker::OpRoleAttrName(), + static_cast(OpRole::kForward)); } // a->OP0->b diff --git a/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.cc b/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..19056e18aa892dbc83dfbf7305b6ad8b6b6bc51c --- /dev/null +++ b/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.cc @@ -0,0 +1,58 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.h" +#include "paddle/fluid/framework/ir/graph_pattern_detector.h" + +namespace paddle { +namespace framework { +namespace ir { + +#define GET_NODE(id, pattern) \ + PADDLE_ENFORCE(subgraph.count(pattern.RetrieveNode(#id)), \ + "pattern has no Node called %s", #id); \ + auto* id = subgraph.at(pattern.RetrieveNode(#id)); \ + PADDLE_ENFORCE_NOT_NULL(id, "subgraph has no node %s", #id); + +std::unique_ptr DepthwiseConvMKLDNNPass::ApplyImpl( + std::unique_ptr graph) const { + PADDLE_ENFORCE(graph.get()); + FusePassBase::Init("depthwise_conv_mkldnn_pass", graph.get()); + GraphPatternDetector gpd; + + auto* pattern = gpd.mutable_pattern(); + pattern->NewNode("depthwise_conv") + ->assert_is_op("depthwise_conv2d") + ->assert_op_attr("use_mkldnn", true); + + int found_depthwise_conv_mkldnn_count = 0; + auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, + Graph* g) { + VLOG(3) << "handle DepthwiseConvMKLDNN fuse"; + GET_NODE(depthwise_conv, (*pattern)); + depthwise_conv->Op()->SetType("conv2d"); + found_depthwise_conv_mkldnn_count++; + }; + + gpd(graph.get(), handler); + AddStatis(found_depthwise_conv_mkldnn_count); + return graph; +} + +} // namespace ir +} // namespace framework +} // namespace paddle + +REGISTER_PASS(depthwise_conv_mkldnn_pass, + paddle::framework::ir::DepthwiseConvMKLDNNPass); diff --git a/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.h b/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.h new file mode 100644 index 0000000000000000000000000000000000000000..8ca6a7325186401c26eb7f9375cf83b7b97cc1c9 --- /dev/null +++ b/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.h @@ -0,0 +1,34 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/ir/fuse_pass_base.h" + +namespace paddle { +namespace framework { +namespace ir { + +class DepthwiseConvMKLDNNPass : public FusePassBase { + public: + virtual ~DepthwiseConvMKLDNNPass() {} + + protected: + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; +}; + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass_tester.cc b/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass_tester.cc new file mode 100644 index 0000000000000000000000000000000000000000..09d0b15f46a7e50afb6aea46383013ce6a6c6118 --- /dev/null +++ b/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass_tester.cc @@ -0,0 +1,123 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.h" + +#include + +namespace paddle { +namespace framework { +namespace ir { + +void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name, + const std::vector& inputs, + const std::vector& outputs, bool use_mkldnn = false) { + auto* op = prog->MutableBlock(0)->AppendOp(); + op->SetType(type); + op->SetAttr("use_mkldnn", use_mkldnn); + op->SetAttr("name", name); + op->SetInput("Input", {inputs[0]}); + op->SetInput("Filter", {inputs[1]}); + op->SetInput("Bias", {inputs[2]}); + op->SetOutput("Out", outputs); +} + +// (a, weights, bias)->depthwise conv mkldnn->b +// (b, weights2, bias2)->depthwise conv no mkldnn->c +// (c, weights3, bias3)->conv mkldnn->d +// (d, weights3, bias3)->conv no mkldnn->e +ProgramDesc BuildProgramDesc() { + ProgramDesc prog; + for (auto& v : std::vector( + {"a", "b", "c", "d", "e", "weights", "bias", "weights2", "bias2", + "weights3", "bias3", "weights4", "bias4"})) { + auto* var = prog.MutableBlock(0)->Var(v); + var->SetType(proto::VarType::SELECTED_ROWS); + if (v == "weights" || v == "bias" || v == "weights2" || v == "bias2" || + v == "weights3" || v == "bias3" || v == "weights4" || v == "bias4") { + var->SetPersistable(true); + } + } + + // depthwise conv with MKL-DNN + SetOp(&prog, "depthwise_conv2d", "conv1", + std::vector({"a", "weights", "bias"}), + std::vector({"b"}), true); + // depthwise conv without MKL-DNN + SetOp(&prog, "depthwise_conv2d", "conv2", + std::vector({"b", "weights2", "bias2"}), + std::vector({"c"}), false); + // conv with MKL-DNN + SetOp(&prog, "conv2d", "conv3", + std::vector({"c", "weights3", "bias3"}), + std::vector({"d"}), true); + // conv without MKL-dNN + SetOp(&prog, "conv2d", "conv4", + std::vector({"d", "weights4", "bias4"}), + std::vector({"e"}), false); + + return prog; +} + +TEST(DepthwiseConvMKLDNNPass, basic) { + auto prog = BuildProgramDesc(); + + std::unique_ptr graph(new ir::Graph(prog)); + + auto pass = PassRegistry::Instance().Get("depthwise_conv_mkldnn_pass"); + + struct counters { + int mkldnn_depthwise_conv_nodes; + int other_depthwise_conv_nodes; + int mkldnn_conv_nodes; + int other_conv_nodes; + }; + + counters before{1, 1, 1, 1}; + + graph = pass->Apply(std::move(graph)); + + // initialize counters before loop + counters after{0, 0, 0, 0}; + + for (auto* node : graph->Nodes()) { + if (node->IsOp()) { + auto* op = node->Op(); + if (op->Type() == "conv2d") { + if (boost::get(op->GetAttr("use_mkldnn"))) + after.mkldnn_conv_nodes++; + else + after.other_conv_nodes++; + } else if (op->Type() == "depthwise_conv2d") { + if (boost::get(op->GetAttr("use_mkldnn"))) + after.mkldnn_depthwise_conv_nodes++; + else + after.other_depthwise_conv_nodes++; + } + } + } + + EXPECT_EQ(after.other_depthwise_conv_nodes, + before.other_depthwise_conv_nodes); + EXPECT_EQ(after.other_conv_nodes, before.other_conv_nodes); + EXPECT_EQ(after.mkldnn_depthwise_conv_nodes, + before.mkldnn_depthwise_conv_nodes - 1); + EXPECT_EQ(after.mkldnn_conv_nodes, before.mkldnn_conv_nodes + 1); +} + +} // namespace ir +} // namespace framework +} // namespace paddle + +USE_PASS(depthwise_conv_mkldnn_pass); diff --git a/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc b/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc index 06286a109d01af638e74e06ccc83e2a5500663ea..2db7d95cae1c8c59691fd642e2462e92ed58814f 100644 --- a/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/framework/ir/fc_fuse_pass.h" #include +#include "paddle/fluid/framework/op_proto_maker.h" namespace paddle { namespace framework { @@ -32,6 +33,8 @@ void SetOp(ProgramDesc* prog, const std::string& type, op->SetInput("X", inputs); } op->SetOutput("Out", outputs); + op->SetAttr(OpProtoAndCheckerMaker::OpRoleAttrName(), + static_cast(OpRole::kForward)); } // a->OP0->b diff --git a/paddle/fluid/framework/ir/graph.cc b/paddle/fluid/framework/ir/graph.cc index 265a128e95e6205159de67d87d5cb8ca1ad7d475..132159b8b272f311060a39b58919c26822bf50ee 100644 --- a/paddle/fluid/framework/ir/graph.cc +++ b/paddle/fluid/framework/ir/graph.cc @@ -23,8 +23,67 @@ limitations under the License. */ namespace paddle { namespace framework { namespace ir { +namespace { + +void CheckProgram(const ProgramDesc &program) { +#define _INT(role) static_cast(role) + + std::map visit; + for (OpDesc *op : program.Block(0).AllOps()) { + // For backward compatibility, some program doesn't have role added. + if (!op->HasAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) continue; + int role_id = + boost::get(op->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())); + visit[role_id] = true; + switch (role_id) { + case _INT(OpRole::kForward): + if (visit.find(_INT(OpRole::kBackward)) != visit.end()) { + LOG(ERROR) + << "Cannot add backward operator before forward operator %s." + << op->Type(); + } + break; + case _INT(OpRole::kBackward): + case _INT(OpRole::kBackward) | _INT(OpRole::kLoss): + PADDLE_ENFORCE( + visit.find(_INT(OpRole::kOptimize)) == visit.end(), + "Cannot add backward operator %s after optimize operator.", + op->Type()); + break; + case _INT(OpRole::kForward) | _INT(OpRole::kLoss): + PADDLE_ENFORCE(visit.find(_INT(OpRole::kBackward) | + _INT(OpRole::kLoss)) == visit.end(), + "Cannot add backward|loss operator before " + "forward|loss operator %s.", + op->Type()); + PADDLE_ENFORCE( + visit.find(_INT(OpRole::kOptimize)) == visit.end(), + "Cannot add forward|loss operator %s after optimize operator.", + op->Type()); + break; + case _INT(OpRole::kOptimize): + case _INT(OpRole::kOptimize) | _INT(OpRole::kLRSched): + PADDLE_ENFORCE(visit.find(_INT(OpRole::kBackward)) != visit.end(), + "Optimize operators %s must follow backward operator.", + op->Type()); + break; + case _INT(OpRole::kLRSched): + case _INT(OpRole::kDist): + case _INT(OpRole::kRPC): + case _INT(OpRole::kNotSpecified): + break; + default: + LOG(FATAL) << "Unknown operator role. Don't add new role because " + "you don't know what you are doing."; + } + } + +#undef _INT +} +} // namespace Graph::Graph(const ProgramDesc &program) : program_(program) { + CheckProgram(program_); // Make the nodes id start from 0. Node::ResetId(); auto var_nodes = InitFromProgram(program_); diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index 29b604afbfcfc2bac67e447db8cd4c671c036dbe..b20d70132256bd5df7411c46ff4eb246b1f14ba8 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -259,6 +259,15 @@ GraphPatternDetector::DetectPatterns() { return result; } +bool GraphItemCMP(const std::pair &a, + const std::pair &b) { + if (a.first != b.first) { + return a.first < b.first; + } else { + return a.second < b.second; + } +} + // TODO(Superjomn) enhance the function as it marks unique unique as duplicates // see https://github.com/PaddlePaddle/Paddle/issues/13550 void GraphPatternDetector::UniquePatterns( @@ -267,12 +276,16 @@ void GraphPatternDetector::UniquePatterns( std::vector result; std::unordered_set set; + std::hash hasher; for (auto &g : *subgraphs) { - size_t key = 0; - for (auto &item : g) { - key ^= std::hash{}(item.first); - key ^= std::hash{}(item.second); + // Sort the items in the sub-graph, and transform to a string key. + std::vector> sorted_keys(g.begin(), g.end()); + std::sort(sorted_keys.begin(), sorted_keys.end(), GraphItemCMP); + std::stringstream ss; + for (auto &item : sorted_keys) { + ss << item.first << ":" << item.second; } + auto key = hasher(ss.str()); if (!set.count(key)) { result.emplace_back(g); set.insert(key); diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index 1e7da9a69c7cbf8c13306656599a759515802b76..669d08c70c9b7453264806b346a6c9eb211cfd4a 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -418,7 +418,7 @@ void LoDTensor::MergeLoDTensor( PADDLE_ENFORCE_EQ(new_lod.size(), lod.size()); for (size_t j = 0; j < lod.size(); ++j) { auto &sub_lod = new_lod[j]; - auto &offset = sub_lod.back(); + size_t offset = sub_lod.back(); for (size_t k = 1; k < lod[j].size(); ++k) { sub_lod.push_back(lod[j][k] + offset); } diff --git a/paddle/fluid/framework/lod_tensor_array.h b/paddle/fluid/framework/lod_tensor_array.h index 0ad6a709008406257d6c0a220bce38bb24e188cd..36a5c3c5d601390beedaf37ceb98ee2c63ecf5a6 100644 --- a/paddle/fluid/framework/lod_tensor_array.h +++ b/paddle/fluid/framework/lod_tensor_array.h @@ -19,81 +19,7 @@ limitations under the License. */ namespace paddle { namespace framework { -// NOTE The vector can't be replaced with the class LoDTensorArray -// directly, because there are many vector used accross the project, -// and some of them are treated as LoDTensorArray. -#if !defined(PADDLE_ON_INFERENCE) - using LoDTensorArray = std::vector; -#else // !PADDLE_ON_INFERENCE - -#pragma message "LoDTensorArray is replaced with the inference one." -/* - * A LoDTensorArray which will not deallocate buffer when resized, fix the data - * diff in inference, and more performance friendly in the concurrency - * scenerios. - */ -class LoDTensorArray { - public: - LoDTensorArray() = default; - - using iterator = std::vector::iterator; - using const_iterator = std::vector::const_iterator; - - const_iterator begin() const { return array_.begin(); } - const_iterator end() const { return array_.begin() + size_; } - iterator begin() { return array_.begin(); } - iterator end() { return array_.begin() + size_; } - - void push_back(const LoDTensor& x) { - if (size_ < array_.size()) { - array_[size_++] = x; - } else { - array_.push_back(x); - ++size_; - } - } - void resize(size_t size) { - if (array_.size() < size) { - array_.resize(size); - } - size_ = size; - } - - void emplace_back() { array_.emplace_back(); } - - void emplace_back(LoDTensor&& x) { array_.emplace_back(std::move(x)); } - - LoDTensor& back() { return array_.back(); } - - size_t space() const { return array_.size(); } - - void reserve(size_t size) { - // Naive warning to tell user this array might be to large. The memory and - // buffer used by this TensorArray will not be deleted during the training - // and inference phase, so attention not to make it expand too long. - if (size > 800UL) { - LOG(WARNING) << "TensorArray has more than 800 items"; - } - array_.reserve(size); - } - - bool empty() const { return size_ == 0UL; } - void clear() { size_ = 0UL; } - - LoDTensor& operator[](size_t id) { return array_[id]; } - const LoDTensor& operator[](size_t id) const { return array_[id]; } - LoDTensor& at(size_t id) { return array_.at(id); } - const LoDTensor& at(size_t id) const { return array_.at(id); } - - size_t size() const { return size_; } - - private: - size_t size_{0}; - std::vector array_; -}; -#endif // !PADDLE_ON_INFERENCE - } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 9259bb740a8a2408e4dc7be21711560fdf250752..45fc36c70633204dbfadbd10757c08b009d2cc74 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -354,18 +354,18 @@ void OperatorBase::GenerateTemporaryNames() { } } -static bool VarIsTensor(const Variable* var) { - return var->IsType() || var->IsType(); +static bool VarIsTensor(const Variable& var) { + return var.IsType() || var.IsType(); } -const Tensor* GetTensorFromVar(Variable* var) { - if (var->IsType()) { - return var->GetMutable(); - } else if (var->IsType()) { - return var->GetMutable()->mutable_value(); +const Tensor* GetTensorFromVar(const Variable& var) { + if (var.IsType()) { + return static_cast(&(var.Get())); + } else if (var.IsType()) { + return &(var.Get().value()); } else { PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.", - var->Type().name()); + var.Type().name()); } } @@ -415,8 +415,7 @@ bool ExecutionContext::HasOutput(const std::string& name) const { template <> const Tensor* ExecutionContext::Input(const std::string& name) const { auto* var = InputVar(name); - return var == nullptr ? nullptr - : GetTensorFromVar(const_cast(var)); + return var == nullptr ? nullptr : GetTensorFromVar(*var); } template <> @@ -428,7 +427,7 @@ const std::vector ExecutionContext::MultiInput( std::transform(names.begin(), names.end(), std::back_inserter(res), [&](const std::string& sub_name) { auto var = scope_.FindVar(sub_name); - return var == nullptr ? nullptr : GetTensorFromVar(var); + return var == nullptr ? nullptr : GetTensorFromVar(*var); }); return res; } @@ -770,8 +769,10 @@ void OperatorWithKernel::TransferInplaceVarsBack( for (auto& var_name : inplace_vars) { VLOG(3) << "share inplace var " + var_name + " back to it's original scope"; auto* original_tensor = GetMutableTensorFromVar(scope.FindVar(var_name)); - auto* transformed_tensor = - GetTensorFromVar(transfer_scope.FindVar(var_name)); + auto* var = transfer_scope.FindVar(var_name); + PADDLE_ENFORCE(var != nullptr, "The var[%s] should not be nullptr", + var_name); + auto* transformed_tensor = GetTensorFromVar(*var); original_tensor->ShareDataWith(*transformed_tensor); } } @@ -784,11 +785,11 @@ Scope* OperatorWithKernel::TryTransferData( for (auto& var_name : var_name_item.second) { auto* var = scope.FindVar(var_name); // Only tensor can be tranfer to another device. - if (var == nullptr || !VarIsTensor(var)) { + if (var == nullptr || !VarIsTensor(*var)) { continue; } - auto* tensor_in = GetTensorFromVar(var); + auto* tensor_in = GetTensorFromVar(*var); if (!tensor_in->IsInitialized()) { continue; } diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index a04d2834eb94c2d8df9c6e48782d10bb3254a6dd..96ad3205235b921a7cf60ed674a8350f74d18509 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -63,7 +63,7 @@ inline std::string GradVarName(const std::string& var_name) { } proto::VarType::Type GetDataTypeOfVar(const Variable* var); -const Tensor* GetTensorFromVar(Variable* var); +const Tensor* GetTensorFromVar(const Variable& var); class OperatorBase; class ExecutionContext; diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 4abde1f21e9701794154bee0c78be8c35582c34c..a45b9ec7a20ac3629d182f009b735d4d82fb5dc2 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -303,10 +303,8 @@ void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes( } ParallelExecutor::~ParallelExecutor() { - const auto dev_ctxs = - platform::DeviceContextPool::Instance().GetAllDeviceContexts(); - for (auto &dev_ctx : dev_ctxs) { - dev_ctx->Wait(); + for (auto &p : member_->places_) { + platform::DeviceContextPool::Instance().Get(p)->Wait(); } if (member_->own_local_scope_) { diff --git a/paddle/fluid/framework/tensor_test.cc b/paddle/fluid/framework/tensor_test.cc index cb2061c06a429d8e8116001a4aa4e8c46ea13428..a0a9a573603ceb6b577529101cb331adbc81337a 100644 --- a/paddle/fluid/framework/tensor_test.cc +++ b/paddle/fluid/framework/tensor_test.cc @@ -75,6 +75,19 @@ TEST(Tensor, MutableData) { platform::CPUPlace()); EXPECT_EQ(p1, p2); } + // Not sure if it's desired, but currently, Tensor type can be changed. + { + framework::Tensor src_tensor; + int8_t* p1 = src_tensor.mutable_data(framework::make_ddim({1}), + platform::CPUPlace()); + EXPECT_NE(p1, nullptr); + *p1 = 1; + + uint8_t* p2 = src_tensor.mutable_data(framework::make_ddim({1}), + platform::CPUPlace()); + EXPECT_NE(p2, nullptr); + EXPECT_EQ(static_cast(p2[0]), 1); + } #ifdef PADDLE_WITH_CUDA { diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index 69bcbc0e5891f95af4de8dfd49a25648ca920ab1..ca1e01c89f07c4ffc3979a6a6c3728328e0a1819 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -153,6 +153,12 @@ void TensorCopySync(const Tensor& src, const platform::Place& dst_place, auto src_gpu_place = boost::get(src_place); auto dst_gpu_place = boost::get(dst_place); memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr); + } else if (platform::is_cuda_pinned_place(src_place) && + platform::is_gpu_place(dst_place)) { + auto src_pinned_place = boost::get(src_place); + auto dst_gpu_place = boost::get(dst_place); + memory::Copy(dst_gpu_place, dst_ptr, src_pinned_place, src_ptr, size, + nullptr); } #endif } diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index dbbe8bcba69a1d87e21c8eae18834fb708e8b1e4..d31c8e3b7d66a0cdb2c4725783c9a24f049c666d 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -1,3 +1,6 @@ +if(WITH_TESTING) + include(test.cmake) # some generic cmake funtion for inference +endif() # analysis and tensorrt must be added before creating static library, # otherwise, there would be undefined reference to them in static library. add_subdirectory(analysis) diff --git a/paddle/fluid/inference/analysis/CMakeLists.txt b/paddle/fluid/inference/analysis/CMakeLists.txt index d4d2fd4634f9e11f3f002e11e177c332ced49885..0354f9e6e9588af601210b8a71ae98c1f90d62f0 100644 --- a/paddle/fluid/inference/analysis/CMakeLists.txt +++ b/paddle/fluid/inference/analysis/CMakeLists.txt @@ -20,22 +20,17 @@ cc_test(test_node SRCS node_tester.cc DEPS analysis) cc_test(test_dot SRCS dot_tester.cc DEPS analysis) cc_binary(inference_analyzer SRCS analyzer_main.cc DEPS analysis paddle_fluid) -function (inference_analysis_test TARGET) - if(WITH_TESTING) - set(options "") - set(oneValueArgs "") - set(multiValueArgs SRCS ARGS EXTRA_DEPS) - cmake_parse_arguments(analysis_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - set(mem_opt "") - if(WITH_GPU) - set(mem_opt "--fraction_of_gpu_memory_to_use=0.5") - endif() - cc_test(${TARGET} - SRCS "${analysis_test_SRCS}" - DEPS analysis pass ${GLOB_PASS_LIB} ${analysis_test_EXTRA_DEPS} - ARGS --inference_model_dir=${PYTHON_TESTS_DIR}/book/word2vec.inference.model ${mem_opt} ${analysis_test_ARGS}) - set_tests_properties(${TARGET} PROPERTIES DEPENDS test_word2vec) - endif(WITH_TESTING) +function(inference_analysis_test TARGET) + if(WITH_TESTING) + set(options "") + set(oneValueArgs "") + set(multiValueArgs SRCS ARGS EXTRA_DEPS) + cmake_parse_arguments(analysis_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + inference_base_test(${TARGET} + SRCS ${analysis_test_SRCS} + DEPS analysis pass ${GLOB_PASS_LIB} ${analysis_test_EXTRA_DEPS} + ARGS --inference_model_dir=${WORD2VEC_MODEL_DIR} ${analysis_test_ARGS}) + endif() endfunction(inference_analysis_test) inference_analysis_test(test_analyzer SRCS analyzer_tester.cc EXTRA_DEPS paddle_inference_api) diff --git a/paddle/fluid/inference/analysis/analyzer.h b/paddle/fluid/inference/analysis/analyzer.h index 7114f5222c5904bb4422b9e67ad035b85bbb770c..3af1d572dfd81197797dd7e57d87ba12c2f3548e 100644 --- a/paddle/fluid/inference/analysis/analyzer.h +++ b/paddle/fluid/inference/analysis/analyzer.h @@ -79,6 +79,7 @@ class Analyzer : public OrderedRegistry { "conv_bn_fuse_pass", // "conv_eltwiseadd_bn_fuse_pass", // #ifdef PADDLE_WITH_MKLDNN + "depthwise_conv_mkldnn_pass", // "conv_bias_mkldnn_fuse_pass", // "conv_relu_mkldnn_fuse_pass", // "conv_elementwise_add_mkldnn_fuse_pass", // diff --git a/paddle/fluid/inference/analysis/data_flow_graph_tester.cc b/paddle/fluid/inference/analysis/data_flow_graph_tester.cc index 1682011c3d8cc9927a4b026b370671798cace625..50ce20621fb289023ecccf7bb39d98169765d5ee 100644 --- a/paddle/fluid/inference/analysis/data_flow_graph_tester.cc +++ b/paddle/fluid/inference/analysis/data_flow_graph_tester.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/inference/analysis/data_flow_graph.h" +#include "paddle/fluid/framework/op_proto_maker.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/inference/analysis/ut_helper.h" @@ -130,6 +131,8 @@ void SetOp(framework::ProgramDesc* prog, const std::string& type, op->SetType(type); op->SetInput("Xs", inputs); op->SetOutput("Xs", outputs); + op->SetAttr(framework::OpProtoAndCheckerMaker::OpRoleAttrName(), + static_cast(framework::OpRole::kForward)); } TEST(DataFlowGraph, Build_IR_Graph) { diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index a55426f74f988176aeb180e48d1af8632ed3b5c7..49a9ebe3ddec1e4fd59ae1155a706859e249d25c 100644 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -17,39 +17,12 @@ if(APPLE) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=pessimizing-move") endif(APPLE) - -set(inference_deps paddle_inference_api paddle_fluid_api analysis pass ir_pass_manager naive_executor ${GLOB_PASS_LIB} - ) +set(inference_deps paddle_inference_api paddle_fluid_api analysis pass ir_pass_manager naive_executor ${GLOB_PASS_LIB}) if(WITH_GPU AND TENSORRT_FOUND) set(inference_deps ${inference_deps} paddle_inference_tensorrt_subgraph_engine analysis_predictor) endif() -function(inference_api_test TARGET_NAME) - if (WITH_TESTING) - set(options "") - set(oneValueArgs SRC) - set(multiValueArgs ARGS) - cmake_parse_arguments(inference_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - if (WITH_GPU) - cc_test(${TARGET_NAME} - SRCS ${inference_test_SRC} - DEPS "${inference_deps}" - ARGS --dirname=${PYTHON_TESTS_DIR}/book/ --fraction_of_gpu_memory_to_use=0.15) - else() - cc_test(${TARGET_NAME} - SRCS ${inference_test_SRC} - DEPS "${inference_deps}" - ARGS --dirname=${PYTHON_TESTS_DIR}/book/) - endif() - if(inference_test_ARGS) - set_tests_properties(${TARGET_NAME} - PROPERTIES DEPENDS "${inference_test_ARGS}") - endif() - endif(WITH_TESTING) -endfunction(inference_api_test) - cc_library(reset_tensor_array SRCS details/reset_tensor_array.cc DEPS lod_tensor scope) cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS reset_tensor_array lod_tensor scope) cc_library(analysis_predictor SRCS analysis_predictor.cc DEPS paddle_inference_api analysis naive_executor zero_copy_tensor) @@ -59,8 +32,11 @@ cc_test(test_paddle_inference_api SRCS api_tester.cc DEPS paddle_inference_api) -inference_api_test(test_api_impl SRC api_impl_tester.cc - ARGS test_word2vec test_image_classification) +if(WITH_TESTING) + inference_base_test(test_api_impl SRCS api_impl_tester.cc DEPS ${inference_deps} + ARGS --word2vec_dirname=${WORD2VEC_MODEL_DIR} --book_dirname=${PYTHON_TESTS_DIR}/book) + set_tests_properties(test_api_impl PROPERTIES DEPENDS test_image_classification) +endif() cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS analysis_predictor ${inference_deps} paddle_inference_api ARGS --dirname=${PYTHON_TESTS_DIR}/book) @@ -68,8 +44,10 @@ if(WITH_GPU AND TENSORRT_FOUND) cc_library(paddle_inference_tensorrt_subgraph_engine SRCS api_tensorrt_subgraph_engine.cc DEPS paddle_inference_api analysis tensorrt_engine paddle_inference_api paddle_fluid_api tensorrt_converter zero_copy_tensor_dummy) - -inference_api_test(test_api_tensorrt_subgraph_engine SRC api_tensorrt_subgraph_engine_tester.cc ARGS test_word2vec) + if(WITH_TESTING) + inference_base_test(test_api_tensorrt_subgraph_engine SRCS api_tensorrt_subgraph_engine_tester.cc DEPS ${inference_deps} + ARGS --dirname=${WORD2VEC_MODEL_DIR}) + endif() endif() if (WITH_ANAKIN AND WITH_MKL) # only needed in CI diff --git a/paddle/fluid/inference/api/api_impl_tester.cc b/paddle/fluid/inference/api/api_impl_tester.cc index 1d4dfb8649563ab23ffeec1f79bb305fd2ebae26..5152b8670ddb206f0927c03149684af4a096df42 100644 --- a/paddle/fluid/inference/api/api_impl_tester.cc +++ b/paddle/fluid/inference/api/api_impl_tester.cc @@ -22,12 +22,14 @@ limitations under the License. */ #include "paddle/fluid/inference/tests/test_helper.h" #ifdef __clang__ -#define ACC_DIFF 4e-2 +#define ACC_DIFF 4e-3 #else -#define ACC_DIFF 1e-2 +#define ACC_DIFF 1e-3 #endif -DEFINE_string(dirname, "", "Directory of the inference model."); +DEFINE_string(word2vec_dirname, "", + "Directory of the word2vec inference model."); +DEFINE_string(book_dirname, "", "Directory of the book inference model."); namespace paddle { @@ -49,7 +51,7 @@ PaddleTensor LodTensorToPaddleTensor(framework::LoDTensor* t) { NativeConfig GetConfig() { NativeConfig config; - config.model_dir = FLAGS_dirname + "/word2vec.inference.model"; + config.model_dir = FLAGS_word2vec_dirname; LOG(INFO) << "dirname " << config.model_dir; config.fraction_of_gpu_memory = 0.15; #ifdef PADDLE_WITH_CUDA @@ -116,7 +118,7 @@ void MainImageClassification(bool use_gpu) { NativeConfig config = GetConfig(); config.use_gpu = use_gpu; config.model_dir = - FLAGS_dirname + "/image_classification_resnet.inference.model"; + FLAGS_book_dirname + "/image_classification_resnet.inference.model"; const bool is_combined = false; std::vector> feed_target_shapes = @@ -220,7 +222,7 @@ void MainThreadsImageClassification(bool use_gpu) { NativeConfig config = GetConfig(); config.use_gpu = use_gpu; config.model_dir = - FLAGS_dirname + "/image_classification_resnet.inference.model"; + FLAGS_book_dirname + "/image_classification_resnet.inference.model"; auto main_predictor = CreatePaddlePredictor(config); std::vector jobs(num_jobs); diff --git a/paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc index 702158ea3bcab854eece3ccd40724d92efcbae67..89c9a65cb06ba565f0e0cbdb9b6031c6adbcb64e 100644 --- a/paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc +++ b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc @@ -29,13 +29,13 @@ void CompareTensorRTWithFluid(bool enable_tensorrt) { //# 1. Create PaddlePredictor with a config. NativeConfig config0; - config0.model_dir = FLAGS_dirname + "word2vec.inference.model"; + config0.model_dir = FLAGS_dirname; config0.use_gpu = true; config0.fraction_of_gpu_memory = 0.3; config0.device = 0; MixedRTConfig config1; - config1.model_dir = FLAGS_dirname + "word2vec.inference.model"; + config1.model_dir = FLAGS_dirname; config1.use_gpu = true; config1.fraction_of_gpu_memory = 0.3; config1.device = 0; diff --git a/paddle/fluid/inference/api/demo_ci/run.sh b/paddle/fluid/inference/api/demo_ci/run.sh index 1ac655bdbbf7c45bfdde2c5fa8026fab2c891903..ff718077c1ba6b10fe87aac10d84f96a23ad6bba 100755 --- a/paddle/fluid/inference/api/demo_ci/run.sh +++ b/paddle/fluid/inference/api/demo_ci/run.sh @@ -62,7 +62,7 @@ for WITH_STATIC_LIB in ON OFF; do -DWITH_GPU=$TEST_GPU_CPU \ -DWITH_STATIC_LIB=$WITH_STATIC_LIB make -j - word2vec_model=${PADDLE_ROOT}'/build/python/paddle/fluid/tests/book/word2vec.inference.model' + word2vec_model=$DATA_DIR'/word2vec/word2vec.inference.model' if [ -d $word2vec_model ]; then for use_gpu in $use_gpu_list; do ./simple_on_word2vec \ diff --git a/paddle/fluid/inference/test.cmake b/paddle/fluid/inference/test.cmake new file mode 100644 index 0000000000000000000000000000000000000000..ab3a30ce6bba14a7d5ec700a159d90031e6b5dc7 --- /dev/null +++ b/paddle/fluid/inference/test.cmake @@ -0,0 +1,31 @@ +set(INFERENCE_URL "http://paddle-inference-dist.cdn.bcebos.com" CACHE STRING "inference download url") +set(INFERENCE_DEMO_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo" CACHE STRING + "A path setting inference demo download directories.") +function (inference_download install_dir url filename) + message(STATUS "Download inference test stuff from ${url}/${filename}") + execute_process(COMMAND bash -c "mkdir -p ${install_dir}") + execute_process(COMMAND bash -c "cd ${install_dir} && wget -q ${url}/${filename}") + message(STATUS "finish downloading ${filename}") +endfunction() + +function (inference_download_and_uncompress install_dir url filename) + inference_download(${install_dir} ${url} ${filename}) + execute_process(COMMAND bash -c "cd ${install_dir} && tar xzf ${filename}") +endfunction() + +set(WORD2VEC_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/word2vec") +if (NOT EXISTS ${WORD2VEC_INSTALL_DIR}) + inference_download_and_uncompress(${WORD2VEC_INSTALL_DIR} ${INFERENCE_URL} "word2vec.inference.model.tar.gz") +endif() +set(WORD2VEC_MODEL_DIR "${WORD2VEC_INSTALL_DIR}/word2vec.inference.model") + +function (inference_base_test TARGET) + set(options "") + set(oneValueArgs "") + set(multiValueArgs SRCS ARGS DEPS) + cmake_parse_arguments(base_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + if(WITH_GPU) + set(mem_opt "--fraction_of_gpu_memory_to_use=0.5") + endif() + cc_test(${TARGET} SRCS ${base_test_SRCS} DEPS ${base_test_DEPS} ARGS ${mem_opt} ${base_test_ARGS}) +endfunction() diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index b49dea6f51c10cec814ff85631bd06175b5156f8..89d271d4ac3fcaf4bdde8bcfa53c6f8d68f7e077 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -1,18 +1,4 @@ -set(INFERENCE_URL "http://paddle-inference-dist.cdn.bcebos.com") -set(INFERENCE_DEMO_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo" CACHE STRING - "A path setting inference demo download directories.") set(INFERENCE_EXTRA_DEPS paddle_inference_api paddle_fluid_api ir_pass_manager analysis_predictor) -function (inference_download install_dir url filename) - message(STATUS "Download inference test stuff from ${url}/${filename}") - execute_process(COMMAND bash -c "mkdir -p ${install_dir}") - execute_process(COMMAND bash -c "cd ${install_dir} && wget -q ${url}/${filename}") - message(STATUS "finish downloading ${filename}") -endfunction() - -function (inference_download_and_uncompress install_dir url filename) - inference_download(${install_dir} ${url} ${filename}) - execute_process(COMMAND bash -c "cd ${install_dir} && tar xzf ${filename}") -endfunction() function(download_model_and_data install_dir model_name data_name) if (NOT EXISTS ${install_dir}) diff --git a/paddle/fluid/operators/affine_grid_cudnn_op.cu.cc b/paddle/fluid/operators/affine_grid_cudnn_op.cu.cc new file mode 100644 index 0000000000000000000000000000000000000000..ed71594ba5781590f3291d56c4ba1a4443003bd5 --- /dev/null +++ b/paddle/fluid/operators/affine_grid_cudnn_op.cu.cc @@ -0,0 +1,112 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/cudnn_helper.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using ScopedSpatialTransformerDescriptor = + platform::ScopedSpatialTransformerDescriptor; + +template +class CUDNNAffineGridOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "It must use CUDAPlace."); + auto& dev_ctx = ctx.template device_context(); + auto handle = dev_ctx.cudnn_handle(); + auto* theta = ctx.Input("Theta"); + auto* output = ctx.Output("Output"); + const T* theta_data = theta->data(); + + int n = theta->dims()[0]; + auto size_attr = ctx.Attr>("output_shape"); + Tensor h_sizes; + int* h_size_data; + if (size_attr.size() == 0) { + auto* output_shape = ctx.Input("OutputShape"); + framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); + h_size_data = h_sizes.data(); + } else { + h_size_data = h_sizes.mutable_data({4}, platform::CPUPlace()); + h_size_data[0] = n; + h_size_data[1] = size_attr[1]; + h_size_data[2] = size_attr[2]; + h_size_data[3] = size_attr[3]; + } + + T* output_data = output->mutable_data( + {n, h_size_data[2], h_size_data[3], 2}, ctx.GetPlace()); + ScopedSpatialTransformerDescriptor st_desc; + cudnnSpatialTransformerDescriptor_t cudnn_st_desc = + st_desc.descriptor(4, h_size_data); + + PADDLE_ENFORCE(platform::dynload::cudnnSpatialTfGridGeneratorForward( + handle, cudnn_st_desc, theta_data, output_data)); + } +}; + +template +class CUDNNAffineGridGradOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "It must use CUDAPlace."); + auto& dev_ctx = ctx.template device_context(); + auto handle = dev_ctx.cudnn_handle(); + auto output_grad = ctx.Input(framework::GradVarName("Output")); + auto theta_grad = ctx.Output(framework::GradVarName("Theta")); + + int n = output_grad->dims()[0]; + auto size_attr = ctx.Attr>("output_shape"); + Tensor h_sizes; + int* h_size_data; + if (size_attr.size() == 0) { + auto* output_shape = ctx.Input("OutputShape"); + framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); + h_size_data = h_sizes.data(); + } else { + h_size_data = h_sizes.mutable_data({4}, platform::CPUPlace()); + h_size_data[0] = n; + h_size_data[1] = size_attr[1]; + h_size_data[2] = size_attr[2]; + h_size_data[3] = size_attr[3]; + } + + ScopedSpatialTransformerDescriptor st_desc; + cudnnSpatialTransformerDescriptor_t cudnn_st_desc = + st_desc.descriptor(4, h_size_data); + + const T* output_grad_data = output_grad->data(); + T* theta_grad_data = theta_grad->mutable_data(ctx.GetPlace()); + + PADDLE_ENFORCE(platform::dynload::cudnnSpatialTfGridGeneratorBackward( + handle, cudnn_st_desc, output_grad_data, theta_grad_data)); + } +}; + +} // namespace operators +} // namespace paddle + +namespace plat = paddle::platform; +REGISTER_OP_KERNEL(affine_grid, CUDNN, plat::CUDAPlace, + paddle::operators::CUDNNAffineGridOpKernel, + paddle::operators::CUDNNAffineGridOpKernel); +REGISTER_OP_KERNEL(affine_grid_grad, CUDNN, plat::CUDAPlace, + paddle::operators::CUDNNAffineGridGradOpKernel, + paddle::operators::CUDNNAffineGridGradOpKernel); diff --git a/paddle/fluid/operators/affine_grid_op.cc b/paddle/fluid/operators/affine_grid_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..0ea28265a245c9cd1a35a79324a33f7cf208a159 --- /dev/null +++ b/paddle/fluid/operators/affine_grid_op.cc @@ -0,0 +1,233 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/affine_grid_op.h" +#include +#include "paddle/fluid/framework/op_registry.h" +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/platform/cudnn_helper.h" +#endif + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +struct Linspace { + framework::Tensor operator()(T start, T end, int count, + const framework::ExecutionContext& ctx) { + Tensor numbers; + T* number_data = numbers.mutable_data({count}, platform::CPUPlace()); + T slice = (end - start) / (T)(count - 1); + for (int i = 0; i < count; ++i) { + number_data[i] = start + (T)i * slice; + } + return numbers; + } +}; + +class AffineGridOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Theta"), + "Input(Theta) of AffineGridOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Output"), + "Output(Output) of AffineGridOp should not be null."); + auto theta_dims = ctx->GetInputDim("Theta"); + PADDLE_ENFORCE(theta_dims.size() == 3, + "AffineGrid's Input(Theta) should be 3-D tensor."); + + auto output_shape = ctx->Attrs().Get>("output_shape"); + if (output_shape.size() == 0) { + PADDLE_ENFORCE(ctx->HasInput("OutputShape"), + "Input(OutputShape) of AffineGridOp should not be null if " + "attr(output_shape) is not configured."); + auto output_shape_dims = ctx->GetInputDim("OutputShape"); + PADDLE_ENFORCE(output_shape_dims.size() == 1, + "AffineGrid's Input(OutputShape) should be 1-D tensor."); + } else { + PADDLE_ENFORCE(output_shape.size() == 4, + "The size of attr(output_shape) should be 4."); + } + + PADDLE_ENFORCE(theta_dims[1] == 2, "Input(theta) dims[1] should be 2."); + PADDLE_ENFORCE(theta_dims[2] == 3, "Input(theta) dims[2] should be 3."); + // N * H * W * 2 + ctx->SetOutputDim("Output", + framework::make_ddim({theta_dims[0], -1, -1, 2})); + ctx->ShareLoD("Theta", "Output"); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + framework::LibraryType library{framework::LibraryType::kPlain}; +#ifdef PADDLE_WITH_CUDA + if (platform::CanCUDNNBeUsed(ctx)) { + library = framework::LibraryType::kCUDNN; + } +#endif + auto data_type = framework::ToDataType(ctx.Input("Theta")->type()); + return framework::OpKernelType(data_type, ctx.GetPlace(), + framework::DataLayout::kAnyLayout, library); + } +}; + +class AffineGridOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput( + "Theta", + "(Tensor) A batch of affine transform parameters with shape [N, 2, 3]. " + "It is used to transform coordinate (x_0, y_0) to coordinate (x_1, " + "y_1)."); + AddInput("OutputShape", + "(Tensor) The shape of target image with format [N, C, H, W].") + .AsDispensable(); + AddOutput("Output", "(Tensor) Output Tensor with shape [N, H, W, 2]."); + AddAttr( + "use_cudnn", + "(bool, default false) Only used in cudnn kernel, need install cudnn") + .SetDefault(true); + AddAttr>( + "output_shape", + "The target output image shape with format [N, C, H, W].") + .SetDefault(std::vector()); + + AddComment(R"DOC( + It generates a grid of (x,y) coordinates using the parameters of the + affine transformation that correspond to a set of points where the input + feature map should be sampled to produce the transformed output feature map. + + Given: + Theta = [[[x_11, x_12, x_13] + [x_14, x_15, x_16]] + [[x_21, x_22, x_23] + [x_24, x_25, x_26]]] + + OutputShape = [2, 3, 5, 5] + + Step 1: + + Generate relative coordinates according to OutputShape. + The values of relative coordinates are in the interval between -1 and 1. + The shape of the relative coordinates is [2, H, W] as below: + + C = [[[-1. -1. -1. -1. -1. ] + [-0.5 -0.5 -0.5 -0.5 -0.5] + [ 0. 0. 0. 0. 0. ] + [ 0.5 0.5 0.5 0.5 0.5] + [ 1. 1. 1. 1. 1. ]] + [[-1. -0.5 0. 0.5 1. ] + [-1. -0.5 0. 0.5 1. ] + [-1. -0.5 0. 0.5 1. ] + [-1. -0.5 0. 0.5 1. ] + [-1. -0.5 0. 0.5 1. ]]] + C[0] is the coordinates in height axis and C[1] is the coordinates in width axis. + + Step2: + Tanspose and reshape C to shape [H * W, 2] and append ones to last dimension. The we get: + C_ = [[-1. -1. 1. ] + [-0.5 -1. 1. ] + [ 0. -1. 1. ] + [ 0.5 -1. 1. ] + [ 1. -1. 1. ] + [-1. -0.5 1. ] + [-0.5 -0.5 1. ] + [ 0. -0.5 1. ] + [ 0.5 -0.5 1. ] + [ 1. -0.5 1. ] + [-1. 0. 1. ] + [-0.5 0. 1. ] + [ 0. 0. 1. ] + [ 0.5 0. 1. ] + [ 1. 0. 1. ] + [-1. 0.5 1. ] + [-0.5 0.5 1. ] + [ 0. 0.5 1. ] + [ 0.5 0.5 1. ] + [ 1. 0.5 1. ] + [-1. 1. 1. ] + [-0.5 1. 1. ] + [ 0. 1. 1. ] + [ 0.5 1. 1. ] + [ 1. 1. 1. ]] + Step3: + Compute output by equation $$Output[i] = C_ * Theta[i]^T$$ + )DOC"); + } +}; + +class AffineGridOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + auto theta_dims = ctx->GetInputDim("Theta"); + if (ctx->HasOutput(framework::GradVarName("Theta"))) { + ctx->SetOutputDim(framework::GradVarName("Theta"), theta_dims); + } + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + framework::LibraryType library_{framework::LibraryType::kPlain}; +#ifdef PADDLE_WITH_CUDA + if (platform::CanCUDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kCUDNN; + } +#endif + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Theta")->type()), + ctx.GetPlace(), framework::DataLayout::kAnyLayout, library_); + } +}; + +class AffineGridGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto* op = new framework::OpDesc(); + op->SetType("affine_grid_grad"); + op->SetInput("Theta", Input("Theta")); + op->SetInput("OutputShape", Input("OutputShape")); + op->SetInput(framework::GradVarName("Output"), OutputGrad("Output")); + + op->SetAttrMap(Attrs()); + + op->SetOutput(framework::GradVarName("Theta"), InputGrad("Theta")); + return std::unique_ptr(op); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(affine_grid, ops::AffineGridOp, ops::AffineGridOpMaker, + ops::AffineGridGradMaker); +REGISTER_OPERATOR(affine_grid_grad, ops::AffineGridOpGrad); + +REGISTER_OP_CPU_KERNEL( + affine_grid, + ops::AffineGridOpKernel, + ops::AffineGridOpKernel); +REGISTER_OP_CPU_KERNEL( + affine_grid_grad, + ops::AffineGridGradOpKernel, + ops::AffineGridGradOpKernel); diff --git a/paddle/fluid/operators/affine_grid_op.h b/paddle/fluid/operators/affine_grid_op.h new file mode 100644 index 0000000000000000000000000000000000000000..07e26c292c3bafc4d98bd392a9e1e21a9eb383a8 --- /dev/null +++ b/paddle/fluid/operators/affine_grid_op.h @@ -0,0 +1,190 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/blas.h" +#include "paddle/fluid/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +template +using EigenTensor = framework::EigenTensor; + +using Array1 = Eigen::DSizes; +using Array2 = Eigen::DSizes; +using Array3 = Eigen::DSizes; +using Array4 = Eigen::DSizes; + +/** + *Return a tensor with evenly spaced numbers over a specified interval. + */ +template +struct Linspace { + framework::Tensor operator()(T start, T end, int count, + const framework::ExecutionContext& ctx); +}; + +template +class AffineGridOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto& place = *ctx.template device_context().eigen_device(); + auto* theta = ctx.Input("Theta"); + int n = theta->dims()[0]; + + auto size_attr = ctx.Attr>("output_shape"); + int h = 0; + int w = 0; + if (size_attr.size() == 0) { + auto* output_shape = ctx.Input("OutputShape"); + Tensor h_sizes; + framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); + const int* h_size_data = h_sizes.data(); + h = h_size_data[2]; + w = h_size_data[3]; + } else { + h = size_attr[2]; + w = size_attr[3]; + } + + auto* output = ctx.Output("Output"); + output->mutable_data({n, h, w, 2}, ctx.GetPlace()); + + math::SetConstant()( + ctx.template device_context(), output, + static_cast(0)); + + Linspace linspace; + // Get indexes of height with shape [height, width, 1] + auto h_idx = linspace((T)-1, (T)1, h, ctx); + auto h_idx_t = EigenTensor::From(h_idx); + // Get indexes of width with shape [height, width, 1] + auto w_idx = linspace((T)-1, (T)1, w, ctx); + auto w_idx_t = EigenTensor::From(w_idx); + // Get constant ones tensor with shape [height, width, 1] + Tensor ones; + ones.mutable_data({h, w, 1}, ctx.GetPlace()); + auto ones_t = EigenTensor::From(ones).setConstant((T)1); + // Get grid tensor with shape [n, h, w, 3] by concatenating h_idx, w_idx and + // ones + Tensor grid; + grid.mutable_data({n, h, w, 3}, ctx.GetPlace()); + auto grid_t = EigenTensor::From(grid); + + grid_t.device(place) = w_idx_t.reshape(Array2(1, w)) + .broadcast(Array2(h, 1)) + .reshape(Array3(h, w, 1)) + .concatenate(h_idx_t.reshape(Array2(1, h)) + .broadcast(Array2(w, 1)) + .shuffle(Array2(1, 0)) + .reshape(Array3(h, w, 1)), + 2) + .eval() + .concatenate(ones_t, 2) + .reshape(Array4(1, h, w, 3)) + .broadcast(Array4(n, 1, 1, 1)); + + // output = grid * theta.T + // TODO(wanghaoshuang): Refine batched matrix multiply + auto blas = math::GetBlas(ctx); + for (int i = 0; i < n; ++i) { + Tensor sliced_grid = grid.Slice(i, i + 1).Resize({h * w, 3}); + Tensor sliced_theta = theta->Slice(i, i + 1).Resize({2, 3}); + Tensor sliced_out = output->Slice(i, i + 1).Resize({h * w, 2}); + blas.MatMul(sliced_grid, false, sliced_theta, true, T(1), &sliced_out, + T(0)); + } + } +}; + +template +class AffineGridGradOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto& place = *ctx.template device_context().eigen_device(); + auto output_grad = ctx.Input(framework::GradVarName("Output")); + auto theta_grad = ctx.Output(framework::GradVarName("Theta")); + + int n = output_grad->dims()[0]; + auto size_attr = ctx.Attr>("output_shape"); + int h = 0; + int w = 0; + if (size_attr.size() == 0) { + auto* output_shape = ctx.Input("OutputShape"); + Tensor h_sizes; + framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); + const int* h_size_data = h_sizes.data(); + h = h_size_data[2]; + w = h_size_data[3]; + } else { + h = size_attr[2]; + w = size_attr[3]; + } + + theta_grad->mutable_data({n, 2, 3}, ctx.GetPlace()); + + math::SetConstant()( + ctx.template device_context(), theta_grad, + static_cast(0)); + + Linspace linspace; + + // Get indexes of height with shape [height, width, 1] + auto h_idx = linspace((T)-1, (T)1, h, ctx); + auto h_idx_t = EigenTensor::From(h_idx); + // Get indexes of width with shape [height, width, 1] + auto w_idx = linspace((T)-1, (T)1, w, ctx); + auto w_idx_t = EigenTensor::From(w_idx); + // Get constant ones tensor with shape [height, width, 1] + Tensor ones; + ones.mutable_data({h, w, 1}, ctx.GetPlace()); + auto ones_t = EigenTensor::From(ones).setConstant((T)1); + // Get grid tensor with shape [n, h, w, 3] by concatenating h_idx, w_idx and + // ones + Tensor grid; + grid.mutable_data({n, h, w, 3}, ctx.GetPlace()); + auto grid_t = EigenTensor::From(grid); + grid_t.device(place) = w_idx_t.reshape(Array2(1, w)) + .broadcast(Array2(h, 1)) + .reshape(Array3(h, w, 1)) + .concatenate(h_idx_t.reshape(Array2(1, h)) + .broadcast(Array2(w, 1)) + .shuffle(Array2(1, 0)) + .reshape(Array3(h, w, 1)), + 2) + .eval() + .concatenate(ones_t, 2) + .reshape(Array4(1, h, w, 3)) + .broadcast(Array4(n, 1, 1, 1)); + // output = grid * theta.T + // TODO(wanghaoshuang): Refine batched matrix multiply + auto blas = math::GetBlas(ctx); + for (int i = 0; i < n; ++i) { + Tensor sliced_grid = grid.Slice(i, i + 1).Resize({h * w, 3}); + Tensor sliced_out_grad = output_grad->Slice(i, i + 1).Resize({h * w, 2}); + Tensor sliced_theta_grad = theta_grad->Slice(i, i + 1).Resize({2, 3}); + blas.MatMul(sliced_out_grad, true, sliced_grid, false, T(1), + &sliced_theta_grad, T(0)); + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/delete_var_op.cc b/paddle/fluid/operators/delete_var_op.cc index d7a9bfbc437dbf4c723b9c87ff62ec6b62c38638..89416f7ab5d07ddac5b540b9bb361f831c1ef360 100644 --- a/paddle/fluid/operators/delete_var_op.cc +++ b/paddle/fluid/operators/delete_var_op.cc @@ -32,6 +32,11 @@ class DeleteVarOp : public framework::OperatorBase { } }; +class DeleteVarOpShapeInference : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *ctx) const override {} +}; + class DeleteVarOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { @@ -48,4 +53,5 @@ It should not be configured by users directly. REGISTER_OPERATOR(delete_var, paddle::operators::DeleteVarOp, paddle::framework::EmptyGradOpMaker, - paddle::operators::DeleteVarOpInfoMaker); + paddle::operators::DeleteVarOpInfoMaker, + paddle::operators::DeleteVarOpShapeInference); diff --git a/paddle/fluid/operators/gather_op.cc b/paddle/fluid/operators/gather_op.cc index 089b541a0a61adb5efda6b2e027c913d5808dff0..f84ff206fffddef1030b7ed439e887bdfef342a6 100644 --- a/paddle/fluid/operators/gather_op.cc +++ b/paddle/fluid/operators/gather_op.cc @@ -102,7 +102,9 @@ REGISTER_OPERATOR(gather, ops::GatherOp, ops::GatherOpMaker, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(gather_grad, ops::GatherGradOp); REGISTER_OP_CPU_KERNEL(gather, ops::GatherOpKernel, - ops::GatherOpKernel, ops::GatherOpKernel); + ops::GatherOpKernel, ops::GatherOpKernel, + ops::GatherOpKernel); REGISTER_OP_CPU_KERNEL(gather_grad, ops::GatherGradientOpKernel, + ops::GatherGradientOpKernel, ops::GatherGradientOpKernel, - ops::GatherGradientOpKernel); + ops::GatherGradientOpKernel); diff --git a/paddle/fluid/operators/gather_op.cu b/paddle/fluid/operators/gather_op.cu index 7e014dd1cb47ee0575308dc13ba7bc7617baebff..9f4aef08cd58e72ce344a640e6564b9e360ce169 100644 --- a/paddle/fluid/operators/gather_op.cu +++ b/paddle/fluid/operators/gather_op.cu @@ -61,5 +61,11 @@ class GatherGradOpCUDAKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(gather, ops::GatherOpCUDAKernel); -REGISTER_OP_CUDA_KERNEL(gather_grad, ops::GatherGradOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL(gather, ops::GatherOpCUDAKernel, + ops::GatherOpCUDAKernel, + ops::GatherOpCUDAKernel, + ops::GatherOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL(gather_grad, ops::GatherGradOpCUDAKernel, + ops::GatherGradOpCUDAKernel, + ops::GatherGradOpCUDAKernel, + ops::GatherGradOpCUDAKernel); diff --git a/paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc b/paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc new file mode 100644 index 0000000000000000000000000000000000000000..7cde7ca462fda9ae6ace7755af0a432afee28bba --- /dev/null +++ b/paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc @@ -0,0 +1,132 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/cudnn_helper.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; +using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; +using DataLayout = platform::DataLayout; +using ScopedSpatialTransformerDescriptor = + platform::ScopedSpatialTransformerDescriptor; +template +using CudnnDataType = platform::CudnnDataType; + +template +class CUDNNGridSampleOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "It must use CUDAPlace"); + auto& dev_ctx = ctx.template device_context(); + auto handle = dev_ctx.cudnn_handle(); + auto* input = ctx.Input("X"); + auto* grid = ctx.Input("Grid"); + auto* output = ctx.Output("Output"); + + int n = input->dims()[0]; + int c = input->dims()[1]; + int h = input->dims()[2]; + int w = input->dims()[3]; + const int size[4] = {n, c, h, w}; + + const T* input_data = input->data(); + const T* grid_data = grid->data(); + T* output_data = output->mutable_data({n, c, h, w}, ctx.GetPlace()); + + ScopedSpatialTransformerDescriptor st_desc; + cudnnSpatialTransformerDescriptor_t cudnn_st_desc = + st_desc.descriptor(4, size); + + ScopedTensorDescriptor input_desc; + ScopedTensorDescriptor output_desc; + cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( + DataLayout::kNCHW, framework::vectorize2int(input->dims())); + cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor( + DataLayout::kNCHW, framework::vectorize2int(output->dims())); + + CUDNN_ENFORCE(platform::dynload::cudnnSpatialTfSamplerForward( + handle, cudnn_st_desc, CudnnDataType::kOne(), cudnn_input_desc, + input_data, grid_data, CudnnDataType::kZero(), cudnn_output_desc, + output_data)); + } +}; + +template +class CUDNNGridSampleGradOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "It must use CUDAPlace"); + auto& dev_ctx = ctx.template device_context(); + auto handle = dev_ctx.cudnn_handle(); + auto* input = ctx.Input("X"); + auto* grid = ctx.Input("Grid"); + auto* output_grad = ctx.Input(framework::GradVarName("Output")); + auto* input_grad = ctx.Output(framework::GradVarName("X")); + auto* grid_grad = ctx.Output(framework::GradVarName("Grid")); + + auto output_grad_dims = output_grad->dims(); + const int n = output_grad_dims[0]; + const int c = output_grad_dims[1]; + const int h = output_grad_dims[2]; + const int w = output_grad_dims[3]; + const int size[4] = {n, c, h, w}; + + ScopedSpatialTransformerDescriptor st_dest; + cudnnSpatialTransformerDescriptor_t cudnn_st_dest = + st_dest.descriptor(4, size); + + const T* input_data = input->data(); + const T* grid_data = grid->data(); + const T* output_grad_data = output_grad->data(); + T* input_grad_data = + input_grad->mutable_data(output_grad_dims, ctx.GetPlace()); + T* grid_grad_data = + grid_grad->mutable_data({n, h, w, 2}, ctx.GetPlace()); + + ScopedTensorDescriptor input_desc; + ScopedTensorDescriptor input_grad_desc; + ScopedTensorDescriptor output_grad_desc; + cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( + DataLayout::kNCHW, framework::vectorize2int(input->dims())); + cudnnTensorDescriptor_t cudnn_input_grad_desc = + input_grad_desc.descriptor( + DataLayout::kNCHW, framework::vectorize2int(input_grad->dims())); + cudnnTensorDescriptor_t cudnn_output_grad_desc = + output_grad_desc.descriptor( + DataLayout::kNCHW, framework::vectorize2int(output_grad->dims())); + + CUDNN_ENFORCE(platform::dynload::cudnnSpatialTfSamplerBackward( + handle, cudnn_st_dest, CudnnDataType::kOne(), cudnn_input_desc, + input_data, CudnnDataType::kZero(), cudnn_input_grad_desc, + input_grad_data, CudnnDataType::kOne(), cudnn_output_grad_desc, + output_grad_data, grid_data, CudnnDataType::kZero(), + grid_grad_data)); + } +}; + +} // namespace operators +} // namespace paddle + +namespace plat = paddle::platform; +REGISTER_OP_KERNEL(grid_sampler, CUDNN, plat::CUDAPlace, + paddle::operators::CUDNNGridSampleOpKernel, + paddle::operators::CUDNNGridSampleOpKernel); +REGISTER_OP_KERNEL(grid_sampler_grad, CUDNN, plat::CUDAPlace, + paddle::operators::CUDNNGridSampleGradOpKernel, + paddle::operators::CUDNNGridSampleGradOpKernel); diff --git a/paddle/fluid/operators/grid_sampler_op.cc b/paddle/fluid/operators/grid_sampler_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..e76eb6893b1f7b6a965682368860c02fa32f6330 --- /dev/null +++ b/paddle/fluid/operators/grid_sampler_op.cc @@ -0,0 +1,203 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/grid_sampler_op.h" +#include "paddle/fluid/framework/op_registry.h" +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/platform/cudnn_helper.h" +#endif + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +class GridSampleOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of GridSampleOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Grid"), + "Input(Grid) of GridSampleOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Output"), + "Output(Output) of GridSampleOp should not be null."); + + auto x_dims = ctx->GetInputDim("X"); + auto grid_dims = ctx->GetInputDim("Grid"); + PADDLE_ENFORCE(x_dims.size() == 4, + "Input(X) of GridSampleOp should be 4-D Tensor."); + PADDLE_ENFORCE(grid_dims.size() == 4, + "Input(Grid) of GridSampleOp should be 4-D Tensor."); + PADDLE_ENFORCE(grid_dims[3] == 2, "Input(Grid) dims[3] should be 2."); + PADDLE_ENFORCE_EQ(grid_dims[0], x_dims[0], + "Input(X) and Input(Grid) dims[0] should be equal."); + PADDLE_ENFORCE_EQ( + grid_dims[1], x_dims[2], + "Input(X) dims[2] and Input(Grid) dims[1] should be equal."); + PADDLE_ENFORCE_EQ( + grid_dims[2], x_dims[3], + "Input(X) dims[3] and Input(Grid) dims[2] should be equal."); + + ctx->SetOutputDim("Output", x_dims); + ctx->ShareLoD("X", "Output"); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + framework::LibraryType library_{framework::LibraryType::kPlain}; +#ifdef PADDLE_WITH_CUDA + if (platform::CanCUDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kCUDNN; + } +#endif + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), + framework::DataLayout::kAnyLayout, library_); + } +}; + +class GridSampleOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", + "(Tensor) The input data of GridSampleOp, " + "This is a 4-D tensor with shape of [N, C, H, W]"); + AddInput( + "Grid", + "(Tensor) The input grid of GridSampleOp generated by AffineGridOp, " + "This is a 4-D tensor with shape of [N, H, W, 2] is the concatenation " + "of x and y coordinates with shape [N, H, W] in last dimention"); + AddOutput("Output", "(Tensor) Output tensor with shape [N, C, H, W]"); + AddAttr( + "use_cudnn", + "(bool, default true) Only used in cudnn kernel, need install cudnn") + .SetDefault(true); + + AddComment(R"DOC( + This operation samples input X by using bilinear interpolation based on + flow field grid, which is usually gennerated by affine_grid. The grid of + shape [N, H, W, 2] is the concatenation of (grid_x, grid_y) coordinates + with shape [N, H, W] each, where grid_x is indexing the 4th dimension + (in width dimension) of input data x and grid_y is indexng the 3rd + dimention (in height dimension), finally results is the bilinear + interpolation value of 4 nearest corner points. + + Step 1: + Get (x, y) grid coordinates and scale to [0, H-1/W-1]. + + grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1) + grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1) + + Step 2: + Indices input data X with grid (x, y) in each [H, W] area, and bilinear + interpolate point value by 4 nearest points. + + wn ------- y_n ------- en + | | | + | d_n | + | | | + x_w --d_w-- grid--d_e-- x_e + | | | + | d_s | + | | | + ws ------- y_s ------- wn + + x_w = floor(x) // west side x coord + x_e = x_w + 1 // east side x coord + y_n = floor(y) // north side y coord + y_s = y_s + 1 // south side y coord + + d_w = grid_x - x_w // distance to west side + d_e = x_e - grid_x // distance to east side + d_n = grid_y - y_n // distance to north side + d_s = y_s - grid_y // distance to south side + + wn = X[:, :, y_n, x_w] // north-west point value + en = X[:, :, y_n, x_e] // north-east point value + ws = X[:, :, y_s, x_w] // south-east point value + es = X[:, :, y_s, x_w] // north-east point value + + output = wn * d_e * d_s + en * d_w * d_s + + ws * d_e * d_n + es * d_w * d_n + )DOC"); + } +}; + +class GridSampleOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + auto input_dims = ctx->GetInputDim("X"); + auto grid_dims = ctx->GetInputDim("Grid"); + if (ctx->HasOutput(framework::GradVarName("X"))) { + ctx->SetOutputDim(framework::GradVarName("X"), input_dims); + } + if (ctx->HasOutput(framework::GradVarName("Grid"))) { + ctx->SetOutputDim(framework::GradVarName("Grid"), grid_dims); + } + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + framework::LibraryType library_{framework::LibraryType::kPlain}; +#ifdef PADDLE_WITH_CUDA + if (platform::CanCUDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kCUDNN; + } +#endif + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), + framework::DataLayout::kAnyLayout, library_); + } +}; + +class GridSampleGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto* op = new framework::OpDesc(); + op->SetType("grid_sampler_grad"); + op->SetInput("X", Input("X")); + op->SetInput("Grid", Input("Grid")); + op->SetInput(framework::GradVarName("Output"), OutputGrad("Output")); + + op->SetAttrMap(Attrs()); + + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + op->SetOutput(framework::GradVarName("Grid"), InputGrad("Grid")); + return std::unique_ptr(op); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(grid_sampler, ops::GridSampleOp, ops::GridSampleOpMaker, + ops::GridSampleGradMaker); +REGISTER_OPERATOR(grid_sampler_grad, ops::GridSampleOpGrad); + +REGISTER_OP_CPU_KERNEL( + grid_sampler, + ops::GridSampleOpKernel, + ops::GridSampleOpKernel); +REGISTER_OP_CPU_KERNEL( + grid_sampler_grad, + ops::GridSampleGradOpKernel, + ops::GridSampleGradOpKernel); diff --git a/paddle/fluid/operators/grid_sampler_op.h b/paddle/fluid/operators/grid_sampler_op.h new file mode 100644 index 0000000000000000000000000000000000000000..0d5874fc0cc4b90bec141690b88f28a27443bd60 --- /dev/null +++ b/paddle/fluid/operators/grid_sampler_op.h @@ -0,0 +1,322 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/gather.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/hostdevice.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +template +using EigenTensor = framework::EigenTensor; + +using Array3 = Eigen::DSizes; +using Array4 = Eigen::DSizes; + +template +static inline bool isInBound(T x, T y, T x_max, T y_max) { + if (x < 0 || x > x_max || y < 0 || y > y_max) { + return false; + } + return true; +} + +template +static void CalcGridLocations(const platform::CPUDeviceContext& ctx, + const Tensor& grid, Tensor* x_w, Tensor* x_e, + Tensor* y_n, Tensor* y_s, Tensor* d_w, + Tensor* d_e, Tensor* d_n, Tensor* d_s) { + auto& place = *ctx.eigen_device(); + const int n = grid.dims()[0]; + const int h = grid.dims()[1]; + const int w = grid.dims()[2]; + const T x_max = static_cast(w - 1); + const T y_max = static_cast(h - 1); + + // split grid with shape (n, h, w, 2) into (x, y) by the 3rd Dim + Tensor grid_x, grid_y; + T* grid_x_data = grid_x.mutable_data({n, h, w}, ctx.GetPlace()); + T* grid_y_data = grid_y.mutable_data({n, h, w}, ctx.GetPlace()); + const T* grid_data = grid.data(); + for (int i = 0; i < n * h * w; i++) { + grid_x_data[i] = grid_data[2 * i]; + grid_y_data[i] = grid_data[(2 * i) + 1]; + } + + Tensor ones; + ones.mutable_data({n, h, w}, ctx.GetPlace()); + auto ones_t = EigenTensor::From(ones).setConstant(1.0); + + // scale grid to [0, h-1/w-1] + auto grid_x_t = EigenTensor::From(grid_x); + auto grid_y_t = EigenTensor::From(grid_y); + grid_x_t.device(place) = 0.5 * ((grid_x_t + ones_t) * x_max); + grid_y_t.device(place) = 0.5 * ((grid_y_t + ones_t) * y_max); + + // calculate coords of 4 corner points + x_w->mutable_data({n, h, w}, ctx.GetPlace()); + x_e->mutable_data({n, h, w}, ctx.GetPlace()); + y_n->mutable_data({n, h, w}, ctx.GetPlace()); + y_s->mutable_data({n, h, w}, ctx.GetPlace()); + auto x_w_t = EigenTensor::From(*x_w); + auto x_e_t = EigenTensor::From(*x_e); + auto y_n_t = EigenTensor::From(*y_n); + auto y_s_t = EigenTensor::From(*y_s); + x_w_t.device(place) = grid_x_t.floor(); + x_e_t.device(place) = x_w_t + ones_t; + y_n_t.device(place) = grid_y_t.floor(); + y_s_t.device(place) = y_n_t + ones_t; + + // calculate distances to 4 sides + d_w->mutable_data({n, h, w}, ctx.GetPlace()); + d_e->mutable_data({n, h, w}, ctx.GetPlace()); + d_n->mutable_data({n, h, w}, ctx.GetPlace()); + d_s->mutable_data({n, h, w}, ctx.GetPlace()); + auto d_w_t = EigenTensor::From(*d_w); + auto d_e_t = EigenTensor::From(*d_e); + auto d_n_t = EigenTensor::From(*d_n); + auto d_s_t = EigenTensor::From(*d_s); + d_w_t.device(place) = grid_x_t - x_w_t; + d_e_t.device(place) = x_e_t - grid_x_t; + d_n_t.device(place) = grid_y_t - y_n_t; + d_s_t.device(place) = y_s_t - grid_y_t; +} + +template +static void GetGridPointValue(const Tensor& input, Tensor* output, + const Tensor& x, const Tensor& y) { + const int n = input.dims()[0]; + const int c = input.dims()[1]; + const int h = input.dims()[2]; + const int w = input.dims()[3]; + auto x_t = EigenTensor::From(x); + auto y_t = EigenTensor::From(y); + auto output_t = EigenTensor::From(*output).setConstant((T)0); + auto input_t = EigenTensor::From(input); + + for (int i = 0; i < n; i++) { + for (int k = 0; k < h; k++) { + for (int l = 0; l < w; l++) { + if (isInBound(x_t(i, k, l), y_t(i, k, l), (T)(w - 1), (T)(h - 1))) { + for (int j = 0; j < c; j++) { + output_t(i, j, k, l) = + input_t(i, j, static_cast(round(y_t(i, k, l))), + static_cast(round(x_t(i, k, l)))); + } + } + } + } + } +} + +template +static void GatherOutputGradToInputGrad(const Tensor& output_grad, + Tensor* input_grad, const Tensor& x, + const Tensor& y, const Tensor& d1, + const Tensor& d2) { + const int n = output_grad.dims()[0]; + const int c = output_grad.dims()[1]; + const int h = output_grad.dims()[2]; + const int w = output_grad.dims()[3]; + auto x_t = EigenTensor::From(x); + auto y_t = EigenTensor::From(y); + auto d1_t = EigenTensor::From(d1); + auto d2_t = EigenTensor::From(d2); + auto input_grad_t = EigenTensor::From(*input_grad); + auto output_grad_t = EigenTensor::From(output_grad); + + for (int i = 0; i < n; i++) { + for (int k = 0; k < h; k++) { + for (int l = 0; l < w; l++) { + if (isInBound(x_t(i, k, l), y_t(i, k, l), (T)(w - 1), (T)(h - 1))) { + for (int j = 0; j < c; j++) { + input_grad_t(i, j, static_cast(round(y_t(i, k, l))), + static_cast(round(x_t(i, k, l)))) += + output_grad_t(i, j, k, l) * d1_t(i, k, l) * d2_t(i, k, l); + } + } + } + } + } +} + +template +class GridSampleOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto& place = *ctx.template device_context().eigen_device(); + auto* input = ctx.Input("X"); + auto* grid = ctx.Input("Grid"); + + const int n = input->dims()[0]; + const int c = input->dims()[1]; + const int h = input->dims()[2]; + const int w = input->dims()[3]; + + // calc locations and distances of 4 corner points + Tensor x_w, x_e, y_n, y_s; + Tensor d_w, d_e, d_n, d_s; + CalcGridLocations( + ctx.template device_context(), *grid, &x_w, + &x_e, &y_n, &y_s, &d_w, &d_e, &d_n, &d_s); + + auto* output = ctx.Output("Output"); + output->mutable_data({n, c, h, w}, ctx.GetPlace()); + math::SetConstant()( + ctx.template device_context(), output, + static_cast(0)); + + // calc 4 corner points value + Tensor v_wn, v_en, v_ws, v_es; + v_wn.mutable_data({n, c, h, w}, ctx.GetPlace()); + v_en.mutable_data({n, c, h, w}, ctx.GetPlace()); + v_ws.mutable_data({n, c, h, w}, ctx.GetPlace()); + v_es.mutable_data({n, c, h, w}, ctx.GetPlace()); + GetGridPointValue(*input, &v_wn, x_w, y_n); + GetGridPointValue(*input, &v_en, x_e, y_n); + GetGridPointValue(*input, &v_ws, x_w, y_s); + GetGridPointValue(*input, &v_es, x_e, y_s); + + auto d_w_t = EigenTensor::From(d_w); + auto d_e_t = EigenTensor::From(d_e); + auto d_n_t = EigenTensor::From(d_n); + auto d_s_t = EigenTensor::From(d_s); + auto d_w_scaled_t = + d_w_t.reshape(Array4(n, 1, h, w)).broadcast(Array4(1, c, 1, 1)); + auto d_e_scaled_t = + d_e_t.reshape(Array4(n, 1, h, w)).broadcast(Array4(1, c, 1, 1)); + auto d_n_scaled_t = + d_n_t.reshape(Array4(n, 1, h, w)).broadcast(Array4(1, c, 1, 1)); + auto d_s_scaled_t = + d_s_t.reshape(Array4(n, 1, h, w)).broadcast(Array4(1, c, 1, 1)); + auto v_wn_t = EigenTensor::From(v_wn); + auto v_en_t = EigenTensor::From(v_en); + auto v_ws_t = EigenTensor::From(v_ws); + auto v_es_t = EigenTensor::From(v_es); + auto output_t = EigenTensor::From(*output); + // bilinear interpolaetion by 4 corner points + output_t.device(place) = v_wn_t * d_e_scaled_t * d_s_scaled_t + + v_en_t * d_w_scaled_t * d_s_scaled_t + + v_ws_t * d_e_scaled_t * d_n_scaled_t + + v_es_t * d_w_scaled_t * d_n_scaled_t; + } +}; + +template +class GridSampleGradOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input = ctx.Input("X"); + auto* grid = ctx.Input("Grid"); + auto* output_grad = ctx.Input(framework::GradVarName("Output")); + + const int n = input->dims()[0]; + const int c = input->dims()[1]; + const int h = input->dims()[2]; + const int w = input->dims()[3]; + + auto* input_grad = ctx.Output(framework::GradVarName("X")); + input_grad->mutable_data({n, c, h, w}, ctx.GetPlace()); + math::SetConstant()( + ctx.template device_context(), input_grad, + static_cast(0)); + auto* grid_grad = ctx.Output(framework::GradVarName("Grid")); + grid_grad->mutable_data({n, h, w, 2}, ctx.GetPlace()); + math::SetConstant()( + ctx.template device_context(), grid_grad, + static_cast(0)); + + Tensor x_w, x_e, y_n, y_s; + Tensor d_w, d_e, d_n, d_s; + CalcGridLocations( + ctx.template device_context(), *grid, &x_w, + &x_e, &y_n, &y_s, &d_w, &d_e, &d_n, &d_s); + + // gather output grad value to input grad by corner point coords and weight + GatherOutputGradToInputGrad(*output_grad, input_grad, x_w, y_n, d_e, + d_s); + GatherOutputGradToInputGrad(*output_grad, input_grad, x_w, y_s, d_e, + d_n); + GatherOutputGradToInputGrad(*output_grad, input_grad, x_e, y_n, d_w, + d_s); + GatherOutputGradToInputGrad(*output_grad, input_grad, x_e, y_s, d_w, + d_n); + + // calc 4 corner points value + Tensor v_wn, v_en, v_ws, v_es; + v_wn.mutable_data({n, c, h, w}, ctx.GetPlace()); + v_en.mutable_data({n, c, h, w}, ctx.GetPlace()); + v_ws.mutable_data({n, c, h, w}, ctx.GetPlace()); + v_es.mutable_data({n, c, h, w}, ctx.GetPlace()); + GetGridPointValue(*input, &v_wn, x_w, y_n); + GetGridPointValue(*input, &v_en, x_e, y_n); + GetGridPointValue(*input, &v_ws, x_w, y_s); + GetGridPointValue(*input, &v_es, x_e, y_s); + auto v_wn_t = EigenTensor::From(v_wn); + auto v_en_t = EigenTensor::From(v_en); + auto v_ws_t = EigenTensor::From(v_ws); + auto v_es_t = EigenTensor::From(v_es); + + auto d_w_t = EigenTensor::From(d_w); + auto d_e_t = EigenTensor::From(d_e); + auto d_n_t = EigenTensor::From(d_n); + auto d_s_t = EigenTensor::From(d_s); + + auto output_grad_t = EigenTensor::From(*output_grad); + + Tensor grid_grad_x, grid_grad_y; + grid_grad_x.mutable_data({n, h, w}, ctx.GetPlace()); + grid_grad_y.mutable_data({n, h, w}, ctx.GetPlace()); + auto grid_grad_x_t = EigenTensor::From(grid_grad_x).setConstant(0.0); + auto grid_grad_y_t = EigenTensor::From(grid_grad_y).setConstant(0.0); + for (int i = 0; i < n; i++) { + for (int j = 0; j < c; j++) { + for (int k = 0; k < h; k++) { + for (int l = 0; l < w; l++) { + grid_grad_x_t(i, k, l) += + ((v_en_t(i, j, k, l) - v_wn_t(i, j, k, l)) * d_s_t(i, k, l) + + (v_es_t(i, j, k, l) - v_ws_t(i, j, k, l)) * d_n_t(i, k, l)) * + output_grad_t(i, j, k, l); + grid_grad_y_t(i, k, l) += + ((v_ws_t(i, j, k, l) - v_wn_t(i, j, k, l)) * d_e_t(i, k, l) + + (v_es_t(i, j, k, l) - v_en_t(i, j, k, l)) * d_w_t(i, k, l)) * + output_grad_t(i, j, k, l); + } + } + } + } + const T x_max = static_cast(w - 1); + const T y_max = static_cast(h - 1); + grid_grad_x_t = grid_grad_x_t * (x_max / (T)2); + grid_grad_y_t = grid_grad_y_t * (y_max / (T)2); + + // gather grid_grad [x, y] in 3rd Dim + T* grid_grad_data = grid_grad->data(); + T* grid_grad_x_data = grid_grad_x.data(); + T* grid_grad_y_data = grid_grad_y.data(); + for (int i = 0; i < n * h * w; i++) { + grid_grad_data[2 * i] = grid_grad_x_data[i]; + grid_grad_data[2 * i + 1] = grid_grad_y_data[i]; + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/CMakeLists.txt b/paddle/fluid/operators/math/CMakeLists.txt index 17b675fba8067851f6149edafcc9096690a3fd34..c1d4cc1b889700079091f859dbbdb46f626dbb0f 100644 --- a/paddle/fluid/operators/math/CMakeLists.txt +++ b/paddle/fluid/operators/math/CMakeLists.txt @@ -76,6 +76,6 @@ endif() cc_test(concat_test SRCS concat_test.cc DEPS concat_and_split) cc_test(cpu_vec_test SRCS cpu_vec_test.cc DEPS blas cpu_info) cc_library(jit_kernel - SRCS jit_kernel.cc jit_kernel_blas.cc jit_kernel_exp.cc jit_kernel_rnn.cc jit_kernel_crf_decode.cc - DEPS cpu_info cblas) + SRCS jit_kernel.cc jit_gen.cc jit_code.cc jit_kernel_blas.cc jit_kernel_exp.cc jit_kernel_rnn.cc jit_kernel_crf_decode.cc + DEPS cpu_info cblas gflags enforce) cc_test(jit_kernel_test SRCS jit_kernel_test.cc DEPS jit_kernel) diff --git a/paddle/fluid/operators/math/jit_code.cc b/paddle/fluid/operators/math/jit_code.cc new file mode 100644 index 0000000000000000000000000000000000000000..06cf82513dfd0a0b3dc139602c59e953983c101a --- /dev/null +++ b/paddle/fluid/operators/math/jit_code.cc @@ -0,0 +1,53 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/math/jit_code.h" +#include "paddle/fluid/operators/math/jit_kernel.h" +#include "paddle/fluid/platform/cpu_info.h" + +namespace paddle { +namespace operators { +namespace math { +namespace jitkernel { +namespace gen { + +using namespace platform::jit; // NOLINT + +bool VMulJitCode::init(int d) { + // TODO(TJ): maybe one AVX is enough, AVX above would slow down freq + // try more with avx2 or avx512 + if (MayIUse(avx) || MayIUse(avx2)) { + return d % AVX_FLOAT_BLOCK == 0; + } else { + return false; + } +} + +void VMulJitCode::generate() { + // do not need push stack, and do not need save avx512reg if do not use avx512 + int stride = sizeof(float) * AVX_FLOAT_BLOCK; + for (int i = 0; i < num_ / AVX_FLOAT_BLOCK; ++i) { + vmovups(ymm_src1, ptr[param1 + i * stride]); + vmovups(ymm_src2, ptr[param2 + i * stride]); + vmulps(ymm_dst, ymm_src1, ymm_src2); + vmovups(ptr[param3 + stride * i], ymm_dst); + } + ret(); +} + +} // namespace gen +} // namespace jitkernel +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/jit_code.h b/paddle/fluid/operators/math/jit_code.h new file mode 100644 index 0000000000000000000000000000000000000000..db1a0cd0958d7047b83d1dd234eaf9f0fb748fa6 --- /dev/null +++ b/paddle/fluid/operators/math/jit_code.h @@ -0,0 +1,63 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/operators/math/jit_gen.h" + +namespace paddle { +namespace operators { +namespace math { +namespace jitkernel { +namespace gen { + +using reg64_t = const Xbyak::Reg64; +using reg32_t = const Xbyak::Reg32; +using xmm_t = const Xbyak::Xmm; +using ymm_t = const Xbyak::Ymm; +using zmm_t = const Xbyak::Zmm; +using Label = Xbyak::Label; + +class VMulJitCode : public JitCode { + public: + DECLARE_JIT_CODE(VMulJitCode); + explicit VMulJitCode(int d, size_t code_size = 256 * 1024, + void* code_ptr = nullptr) + : JitCode(code_size, code_ptr), num_(d) {} + static bool init(int d); + void generate() override; + + private: + int num_; + reg64_t param1{abi_param1}; + reg64_t param2{abi_param2}; + reg64_t param3{abi_param3}; + + xmm_t xmm_src1 = xmm_t(0); + ymm_t ymm_src1 = ymm_t(0); + zmm_t zmm_src1 = zmm_t(0); + xmm_t xmm_src2 = xmm_t(1); + ymm_t ymm_src2 = ymm_t(1); + zmm_t zmm_src2 = zmm_t(1); + + xmm_t xmm_dst = xmm_t(2); + ymm_t ymm_dst = ymm_t(2); + zmm_t zmm_dst = zmm_t(2); +}; + +} // namespace gen +} // namespace jitkernel +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/jit_gen.cc b/paddle/fluid/operators/math/jit_gen.cc new file mode 100644 index 0000000000000000000000000000000000000000..6af39518ed926554c8c839bba701d3827923dba0 --- /dev/null +++ b/paddle/fluid/operators/math/jit_gen.cc @@ -0,0 +1,90 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/math/jit_gen.h" +#include +#include +#include +#include "paddle/fluid/platform/cpu_info.h" + +DEFINE_bool(dump_jitcode, false, "Whether to dump the jitcode to file"); + +namespace paddle { +namespace operators { +namespace math { +namespace jitkernel { +namespace gen { + +constexpr Xbyak::Operand::Code g_abi_regs[] = { + Xbyak::Operand::RBX, Xbyak::Operand::RBP, Xbyak::Operand::R12, + Xbyak::Operand::R13, Xbyak::Operand::R14, Xbyak::Operand::R15}; + +constexpr int num_g_abi_regs = sizeof(g_abi_regs) / sizeof(g_abi_regs[0]); + +void JitCode::preCode() { + for (int i = 0; i < num_g_abi_regs; ++i) { + push(Xbyak::Reg64(g_abi_regs[i])); + } + if (platform::jit::MayIUse(platform::jit::avx512f)) { + mov(reg_EVEX_max_8b_offt, 2 * EVEX_max_8b_offt); + } +} + +void JitCode::postCode() { + for (int i = 0; i < num_g_abi_regs; ++i) { + pop(Xbyak::Reg64(g_abi_regs[num_g_abi_regs - 1 - i])); + } + ret(); +} + +void JitCode::dumpCode(const Xbyak::uint8 *code) const { + if (code) { + static int counter = 0; + std::ostringstream filename; + filename << "paddle_jitcode_" << name() << "." << counter << ".bin"; + counter++; + std::ofstream fout(filename.str(), std::ios::out); + if (fout.is_open()) { + fout.write(reinterpret_cast(code), getSize()); + fout.close(); + } + } +} + +Xbyak::Address JitCode::EVEX_compress_addr(Xbyak::Reg64 base, int offt, + bool bcast) { + int scale = 0; + if (EVEX_max_8b_offt <= offt && offt < 3 * EVEX_max_8b_offt) { + offt = offt - 2 * EVEX_max_8b_offt; + scale = 1; + } else if (3 * EVEX_max_8b_offt <= offt && offt < 5 * EVEX_max_8b_offt) { + offt = offt - 4 * EVEX_max_8b_offt; + scale = 2; + } + auto re = Xbyak::RegExp() + base + offt; + if (scale) { + re = re + reg_EVEX_max_8b_offt * scale; + } + if (bcast) { + return zword_b[re]; + } else { + return zword[re]; + } +} + +} // namespace gen +} // namespace jitkernel +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/jit_gen.h b/paddle/fluid/operators/math/jit_gen.h new file mode 100644 index 0000000000000000000000000000000000000000..6abf3434cc8d8f6ab2838ef822a4f6b948331802 --- /dev/null +++ b/paddle/fluid/operators/math/jit_gen.h @@ -0,0 +1,80 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include "paddle/fluid/platform/macros.h" + +#define XBYAK_USE_MMAP_ALLOCATOR +#include "xbyak/xbyak.h" +#include "xbyak/xbyak_util.h" + +DECLARE_bool(dump_jitcode); + +namespace paddle { +namespace operators { +namespace math { +namespace jitkernel { +namespace gen { + +#define DECLARE_JIT_CODE(codename) \ + const char *name() const override { return #codename; } + +// Application Binary Interface +constexpr Xbyak::Operand::Code abi_param1(Xbyak::Operand::RDI), + abi_param2(Xbyak::Operand::RSI), abi_param3(Xbyak::Operand::RDX), + abi_param4(Xbyak::Operand::RCX), abi_not_param1(Xbyak::Operand::RCX); + +class JitCode : public Xbyak::CodeGenerator { + public: + explicit JitCode(size_t code_size = 256 * 1024, void *code_ptr = nullptr) + : Xbyak::CodeGenerator(code_size, code_ptr) {} + + virtual ~JitCode() {} + virtual const char *name() const = 0; + virtual void generate() = 0; + + template + const FUNC getCode() { + this->generate(); + const Xbyak::uint8 *code = CodeGenerator::getCode(); + if (FLAGS_dump_jitcode) { + this->dumpCode(code); + } + return reinterpret_cast(code); + } + DISABLE_COPY_AND_ASSIGN(JitCode); + + protected: + Xbyak::Reg64 param1{abi_param1}; + const int EVEX_max_8b_offt = 0x200; + const Xbyak::Reg64 reg_EVEX_max_8b_offt = rbp; + + void preCode(); + void postCode(); + void dumpCode(const Xbyak::uint8 *code) const; + void L(const char *label) { Xbyak::CodeGenerator::L(label); } + void L(const Xbyak::Label &label) { Xbyak::CodeGenerator::L(label); } + // Enhanced vector extension + Xbyak::Address EVEX_compress_addr(Xbyak::Reg64 base, int offt, + bool bcast = false); +}; + +} // namespace gen +} // namespace jitkernel +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/jit_kernel.h b/paddle/fluid/operators/math/jit_kernel.h index 48e180b1fd43b06cc13f7a4b00c73aff2eb940ac..7b6027aa267803ff8ff830deabda536b1b27fec8 100644 --- a/paddle/fluid/operators/math/jit_kernel.h +++ b/paddle/fluid/operators/math/jit_kernel.h @@ -39,6 +39,7 @@ class Kernel { public: Kernel() = default; virtual ~Kernel() = default; + // TODO(TJ): below members should be deprecated. int num_{0}; int end_{0}; int rest_{0}; @@ -64,7 +65,7 @@ class KernelPool { template class VMulKernel : public Kernel { public: - virtual void Compute(const T *x, const T *y, T *z) const = 0; + void (*Compute)(const T *, const T *, T *, int); }; template diff --git a/paddle/fluid/operators/math/jit_kernel_blas.cc b/paddle/fluid/operators/math/jit_kernel_blas.cc index c88b17b012d1b9cd59220f6b37cb2ecb8a1551a4..cef21348e43c4342c92bd17f4c70ff106e35d99c 100644 --- a/paddle/fluid/operators/math/jit_kernel_blas.cc +++ b/paddle/fluid/operators/math/jit_kernel_blas.cc @@ -14,7 +14,10 @@ limitations under the License. */ #include "paddle/fluid/operators/math/jit_kernel.h" #include +#include "paddle/fluid/operators/math/jit_code.h" #include "paddle/fluid/operators/math/jit_kernel_macro.h" +#include "paddle/fluid/platform/enforce.h" + #ifdef PADDLE_WITH_MKLML #include "paddle/fluid/platform/dynload/mklml.h" #endif @@ -27,65 +30,76 @@ namespace paddle { namespace operators { namespace math { namespace jitkernel { - namespace jit = platform::jit; +template +void VMulRefer(const T* x, const T* y, T* z, int n) { + for (int i = 0; i < n; ++i) { + z[i] = x[i] * y[i]; + } +} + +#ifdef PADDLE_WITH_MKLML +template +void VMulMKL(const T* x, const T* y, T* z, int n); + +template <> +void VMulMKL(const float* x, const float* y, float* z, int n) { + platform::dynload::vsMul(n, x, y, z); +} +template <> +void VMulMKL(const double* x, const double* y, double* z, int n) { + platform::dynload::vdMul(n, x, y, z); +} +#endif + /* VMUL JitKernel */ -template +template class VMulKernelImpl : public VMulKernel { public: - explicit VMulKernelImpl(int d) : VMulKernel() { this->num_ = d; } - void Compute(const T* x, const T* y, T* z) const override { - for (int i = 0; i < this->num_; ++i) { - z[i] = x[i] * y[i]; - } + static inline std::string name(int d) { + PADDLE_THROW("DType should be either float or double"); } -}; - + static inline bool useJIT(int d) { return false; } + static inline bool useMKL(int d) { return false; } + + explicit VMulKernelImpl(int d) : VMulKernel() { + if (useJIT(d)) { + constexpr size_t sz = 256 * 1024; // TODO(TJ): should be related with d + jitcode_.reset(new gen::VMulJitCode(d, sz)); + this->Compute = + jitcode_->getCode(); + return; + } #ifdef PADDLE_WITH_MKLML -#define MKL_FLOAT(isa, block) \ - template <> \ - void VMulKernelImpl::Compute( \ - const float* x, const float* y, float* z) const { \ - platform::dynload::vsMul(this->num_, x, y, z); \ + if (useMKL(d)) { + this->Compute = VMulMKL; + return; + } +#endif + this->Compute = VMulRefer; } -#define MKL_DOUBLE(isa, block) \ - template <> \ - void VMulKernelImpl::Compute( \ - const double* x, const double* y, double* z) const { \ - platform::dynload::vdMul(this->num_, x, y, z); \ - } + private: + std::unique_ptr jitcode_{nullptr}; +}; -FOR_EACH_ISA(MKL_FLOAT, kGT16); -FOR_EACH_ISA_BLOCK(MKL_DOUBLE); -#endif +template <> +bool VMulKernelImpl::useJIT(int d) { + return gen::VMulJitCode::init(d); +} -#define INTRI8_FLOAT(isa) \ - template <> \ - void VMulKernelImpl::Compute( \ - const float* x, const float* y, float* z) const { \ - __m256 tmpx, tmpy; \ - tmpx = _mm256_loadu_ps(x); \ - tmpy = _mm256_loadu_ps(y); \ - tmpx = _mm256_mul_ps(tmpx, tmpy); \ - _mm256_storeu_ps(z, tmpx); \ - } +template <> +bool VMulKernelImpl::useMKL(int d) { + return jit::MayIUse(jit::avx512f) && d > 512; +} -// avx > for > mkl -#ifdef __AVX__ -INTRI8_FLOAT(jit::avx); -#endif -#ifdef __AVX2__ -INTRI8_FLOAT(jit::avx2); -#endif -#ifdef __AVX512F__ -INTRI8_FLOAT(jit::avx512f); -#endif -// TODO(TJ): eq16 test and complete avx512 -#undef INTRI8_FLOAT -#undef MKL_FLOAT -#undef MKL_DOUBLE +template <> +bool VMulKernelImpl::useMKL(int d) { + return true; +} + +REGISTER_JITKERNEL(vmul, VMulKernel); /* VADD JitKernel */ template @@ -465,13 +479,12 @@ INTRI_COMMON_FLOAT(jit::avx512f, kGT16); #undef INTRI16_FLOAT #undef INTRI_COMMON_FLOAT -REGISTER_JITKERNEL(vmul, VMulKernel); -REGISTER_JITKERNEL(vadd, VAddKernel); -REGISTER_JITKERNEL(vscal, VScalKernel); -REGISTER_JITKERNEL(vaddb, VAddBiasKernel); -REGISTER_JITKERNEL(vrelu, VReluKernel); -REGISTER_JITKERNEL(vaddrelu, VAddReluKernel); -REGISTER_JITKERNEL(videntity, VIdentityKernel); +REGISTER_JITKERNEL_DEPRECATED(vadd, VAddKernel); +REGISTER_JITKERNEL_DEPRECATED(vscal, VScalKernel); +REGISTER_JITKERNEL_DEPRECATED(vaddb, VAddBiasKernel); +REGISTER_JITKERNEL_DEPRECATED(vrelu, VReluKernel); +REGISTER_JITKERNEL_DEPRECATED(vaddrelu, VAddReluKernel); +REGISTER_JITKERNEL_DEPRECATED(videntity, VIdentityKernel); } // namespace jitkernel } // namespace math diff --git a/paddle/fluid/operators/math/jit_kernel_crf_decode.cc b/paddle/fluid/operators/math/jit_kernel_crf_decode.cc index e481d1921a7dc4fd6da3fffbc3959eafa7b4b461..a4861c347e44ad86a066861d3375b556302a84bc 100644 --- a/paddle/fluid/operators/math/jit_kernel_crf_decode.cc +++ b/paddle/fluid/operators/math/jit_kernel_crf_decode.cc @@ -288,7 +288,7 @@ INTRIAVX512_FLOAT(kGT16); #undef INIT_ALPHA #undef UPDATE_ALPHA -REGISTER_JITKERNEL(crf_decode, CRFDecodeKernel); +REGISTER_JITKERNEL_DEPRECATED(crf_decode, CRFDecodeKernel); } // namespace jitkernel } // namespace math diff --git a/paddle/fluid/operators/math/jit_kernel_exp.cc b/paddle/fluid/operators/math/jit_kernel_exp.cc index c4247580f491a7ca26259528ca74dd92e35785a9..d7c177e6782e19e199542e10e1d62587ee0df4cf 100644 --- a/paddle/fluid/operators/math/jit_kernel_exp.cc +++ b/paddle/fluid/operators/math/jit_kernel_exp.cc @@ -250,7 +250,7 @@ INTRI16_FLOAT(jit::avx512f, detail::ExpAVX2); #undef MKL_FLOAT #undef MKL_DOUBLE -REGISTER_JITKERNEL(vexp, VExpKernel); +REGISTER_JITKERNEL_DEPRECATED(vexp, VExpKernel); /* VSigmoid JitKernel */ template @@ -396,7 +396,7 @@ INTRI16_FLOAT(jit::avx512f, detail::ExpAVX2); #undef INTRI_GT16_FLOAT #undef INTRI_VSIGMOID -REGISTER_JITKERNEL(vsigmoid, VSigmoidKernel); +REGISTER_JITKERNEL_DEPRECATED(vsigmoid, VSigmoidKernel); /* VTanh JitKernel */ template @@ -531,7 +531,7 @@ INTRI16_FLOAT(jit::avx512f, detail::ExpAVX2); #undef INTRI_GT16_FLOAT #undef INTRI_VTANH -REGISTER_JITKERNEL(vtanh, VTanhKernel); +REGISTER_JITKERNEL_DEPRECATED(vtanh, VTanhKernel); #undef JITKERNEL_NEW_ACT_IMPL diff --git a/paddle/fluid/operators/math/jit_kernel_macro.h b/paddle/fluid/operators/math/jit_kernel_macro.h index d8e55f2673560ff6afa34376b73275b57a8ceea1..a8169ea48ae3eee5a8cba291be4496c4c6074221 100644 --- a/paddle/fluid/operators/math/jit_kernel_macro.h +++ b/paddle/fluid/operators/math/jit_kernel_macro.h @@ -21,8 +21,71 @@ namespace operators { namespace math { namespace jitkernel { -namespace jit = platform::jit; +#define JITKERNEL_DEFINE_NAME(ker_key, ker_class) \ + template <> \ + std::string ker_class##Impl::name(int d) { \ + std::string key(#ker_key "f"); \ + if (useJIT(d)) { \ + /* only jit code need record d*/ \ + return key + "jit" + std::to_string(d); \ + } else if (useMKL(d)) { \ + return key + "mkl"; \ + } else { \ + return key + "any"; \ + } \ + } \ + template <> \ + std::string ker_class##Impl::name(int d) { \ + std::string key(#ker_key "d"); \ + /* jit code do not support double yet*/ \ + if (useMKL(d)) { \ + return key + "mkl"; \ + } else { \ + return key + "any"; \ + } \ + } + +#define JITKERNEL_DECLARE(ker_class, ker_dtype) \ + template <> \ + std::shared_ptr> \ + KernelPool::Get, int>(int d) + +#define JITKERNEL_FIND_KEY(ker_class, ker_dtype) \ + std::string key = ker_class##Impl::name(d) + +#define JITKERNEL_IMPL(ker_class, ker_dtype) \ + p = std::dynamic_pointer_cast>( \ + std::make_shared>(d)) + +#define REGISTER_JITKERNEL_WITH_DTYPE(ker_class, ker_dtype, marco_declare, \ + macro_find_key, macro_impl) \ + marco_declare(ker_class, ker_dtype) { \ + macro_find_key(ker_class, ker_dtype); \ + if (kers_.find(key) == kers_.end()) { \ + std::shared_ptr> p; \ + macro_impl(ker_class, ker_dtype); \ + kers_.insert({key, std::dynamic_pointer_cast(p)}); \ + return p; \ + } \ + return std::dynamic_pointer_cast>( \ + kers_.at(key)); \ + } +#define REGISTER_JITKERNEL_ARGS(ker_key, ker_class, marco_define_name, \ + marco_declare, macro_find_key, macro_impl) \ + marco_define_name(ker_key, ker_class); \ + REGISTER_JITKERNEL_WITH_DTYPE(ker_class, float, JITKERNEL_DECLARE, \ + JITKERNEL_FIND_KEY, JITKERNEL_IMPL); \ + REGISTER_JITKERNEL_WITH_DTYPE(ker_class, double, JITKERNEL_DECLARE, \ + JITKERNEL_FIND_KEY, JITKERNEL_IMPL) + +#define REGISTER_JITKERNEL(ker_key, ker_class) \ + REGISTER_JITKERNEL_ARGS(ker_key, ker_class, JITKERNEL_DEFINE_NAME, \ + JITKERNEL_DECLARE, JITKERNEL_FIND_KEY, \ + JITKERNEL_IMPL) + +namespace jit = platform::jit; +// TODO(TJ): below defines are deprecated, would be remove recently #define SEARCH_BLOCK(macro_, ker, dtype, isa) \ if (d < AVX_FLOAT_BLOCK) { \ macro_(ker, dtype, isa, kLT8); \ @@ -47,44 +110,42 @@ namespace jit = platform::jit; SEARCH_BLOCK(macro_, ker, dtype, jit::isa_any); \ } -#define JITKERNEL_DECLARE(ker_class, ker_dtype) \ - template <> \ - std::shared_ptr> \ - KernelPool::Get, int>(int d) - #define JITKERNEL_KEY(ker_key, dtype_key) \ #ker_key #dtype_key + std::to_string(d) -#define JITKERNEL_NEW_IMPL(ker, dtype, isa, k) \ - p = std::dynamic_pointer_cast>( \ +#define JITKERNEL_NEW_IMPL_DEPRECATED(ker, dtype, isa, k) \ + p = std::dynamic_pointer_cast>( \ std::make_shared>(d)) -#define JITKERNEL_WITH_DTYPE(ker_key, ker_class, ker_dtype, dtype_key, \ - marco_declare, macro_key, macro_impl) \ - marco_declare(ker_class, ker_dtype) { \ - std::string key = macro_key(ker_key, dtype_key); \ - if (kers_.find(key) == kers_.end()) { \ - std::shared_ptr> p; \ - SEARCH_ISA_BLOCK(macro_impl, ker_class, ker_dtype); \ - kers_.insert({key, std::dynamic_pointer_cast(p)}); \ - return p; \ - } \ - return std::dynamic_pointer_cast>( \ - kers_.at(key)); \ +#define JITKERNEL_WITH_DTYPE_DEPRECATED(ker_key, ker_class, ker_dtype, \ + dtype_key, marco_declare, macro_key, \ + macro_impl) \ + marco_declare(ker_class, ker_dtype) { \ + std::string key = macro_key(ker_key, dtype_key); \ + if (kers_.find(key) == kers_.end()) { \ + std::shared_ptr> p; \ + SEARCH_ISA_BLOCK(macro_impl, ker_class, ker_dtype); \ + kers_.insert({key, std::dynamic_pointer_cast(p)}); \ + return p; \ + } \ + return std::dynamic_pointer_cast>( \ + kers_.at(key)); \ } -#define REGISTER_JITKERNEL(ker_key, ker_class) \ - JITKERNEL_WITH_DTYPE(ker_key, ker_class, float, f, JITKERNEL_DECLARE, \ - JITKERNEL_KEY, JITKERNEL_NEW_IMPL); \ - JITKERNEL_WITH_DTYPE(ker_key, ker_class, double, d, JITKERNEL_DECLARE, \ - JITKERNEL_KEY, JITKERNEL_NEW_IMPL) - -#define REGISTER_JITKERNEL_ARGS(ker_key, ker_class, marco_declare, macro_key, \ - macro_impl) \ - JITKERNEL_WITH_DTYPE(ker_key, ker_class, float, f, marco_declare, macro_key, \ - macro_impl); \ - JITKERNEL_WITH_DTYPE(ker_key, ker_class, double, d, marco_declare, \ - macro_key, macro_impl) +#define REGISTER_JITKERNEL_DEPRECATED(ker_key, ker_class) \ + JITKERNEL_WITH_DTYPE_DEPRECATED(ker_key, ker_class, float, f, \ + JITKERNEL_DECLARE, JITKERNEL_KEY, \ + JITKERNEL_NEW_IMPL_DEPRECATED); \ + JITKERNEL_WITH_DTYPE_DEPRECATED(ker_key, ker_class, double, d, \ + JITKERNEL_DECLARE, JITKERNEL_KEY, \ + JITKERNEL_NEW_IMPL_DEPRECATED) + +#define REGISTER_JITKERNEL_ARGS_DEPRECATED(ker_key, ker_class, marco_declare, \ + macro_key, macro_impl) \ + JITKERNEL_WITH_DTYPE_DEPRECATED(ker_key, ker_class, float, f, marco_declare, \ + macro_key, macro_impl); \ + JITKERNEL_WITH_DTYPE_DEPRECATED(ker_key, ker_class, double, d, \ + marco_declare, macro_key, macro_impl) #define FOR_EACH_ISA(macro_, block) \ macro_(jit::avx512f, block); \ diff --git a/paddle/fluid/operators/math/jit_kernel_rnn.cc b/paddle/fluid/operators/math/jit_kernel_rnn.cc index fab293f7d03eb923995fa4cd99af955a34faa6a4..d0932a37bb85bbc41f662a106c8ef5693a72efeb 100644 --- a/paddle/fluid/operators/math/jit_kernel_rnn.cc +++ b/paddle/fluid/operators/math/jit_kernel_rnn.cc @@ -179,23 +179,23 @@ class LSTMKernelImpl : public LSTMKernel { /* C_t = C_t-1 * fgated + cand_gated * igated */ act_cand_d_->Compute(gates, gates); - vmul_d_->Compute(gates, gates + d_, gates + d_); - vmul_d_->Compute(ct_1, gates + d2_, gates + d2_); + vmul_d_->Compute(gates, gates + d_, gates + d_, d_); + vmul_d_->Compute(ct_1, gates + d2_, gates + d2_, d_); vadd_d_->Compute(gates + d_, gates + d2_, ct); /* H_t = act_cell(C_t) * ogated */ act_cell_d_->Compute(ct, gates + d2_); - vmul_d_->Compute(gates + d2_, gates + d3_, ht); + vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); } void ComputeC1H1(T* gates, T* ct, T* ht, const T* wp_data) const override { /* C_t = igated * cgated*/ act_gate_d_->Compute(gates + d_, gates + d_); act_cand_d_->Compute(gates, gates); - vmul_d_->Compute(gates, gates + d_, ct); + vmul_d_->Compute(gates, gates + d_, ct, d_); /* H_t = act_cell(C_t) * ogated */ act_gate_d_->Compute(gates + d3_, gates + d3_); act_cell_d_->Compute(ct, gates + d2_); - vmul_d_->Compute(gates + d2_, gates + d3_, ht); + vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); } private: @@ -289,36 +289,36 @@ class PeepholeKernelImpl : public LSTMKernel { void ComputeCtHt(T* gates, const T* ct_1, T* ct, T* ht, const T* wp_data, T* checked) const override { /* get fgated and igated*/ - vmul_d_->Compute(wp_data, ct_1, checked); - vmul_d_->Compute(wp_data + d_, ct_1, checked + d_); + vmul_d_->Compute(wp_data, ct_1, checked, d_); + vmul_d_->Compute(wp_data + d_, ct_1, checked + d_, d_); vadd_d2_->Compute(checked, gates + d_, gates + d_); act_gate_d2_->Compute(gates + d_, gates + d_); /* C_t = C_t-1 * fgated + cand_gated * igated*/ act_cand_d_->Compute(gates, gates); - vmul_d_->Compute(gates, gates + d_, gates + d_); - vmul_d_->Compute(ct_1, gates + d2_, gates + d2_); + vmul_d_->Compute(gates, gates + d_, gates + d_, d_); + vmul_d_->Compute(ct_1, gates + d2_, gates + d2_, d_); vadd_d_->Compute(gates + d_, gates + d2_, ct); /* get ogated*/ - vmul_d_->Compute(wp_data + d2_, ct, gates + d_); + vmul_d_->Compute(wp_data + d2_, ct, gates + d_, d_); vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_); act_gate_d_->Compute(gates + d3_, gates + d3_); /* H_t = act_cell(C_t) * ogated */ act_cell_d_->Compute(ct, gates + d2_); - vmul_d_->Compute(gates + d2_, gates + d3_, ht); + vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); } void ComputeC1H1(T* gates, T* ct, T* ht, const T* wp_data) const override { /* C_t = igated * cgated*/ act_gate_d_->Compute(gates + d_, gates + d_); act_cand_d_->Compute(gates, gates); - vmul_d_->Compute(gates, gates + d_, ct); + vmul_d_->Compute(gates, gates + d_, ct, d_); /* get outgated, put W_oc * C_t on igated */ - vmul_d_->Compute(wp_data + d2_, ct, gates + d_); + vmul_d_->Compute(wp_data + d2_, ct, gates + d_, d_); vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_); /* H_t = act_cell(C_t) * ogated */ act_gate_d_->Compute(gates + d3_, gates + d3_); act_cell_d_->Compute(ct, gates + d2_); - vmul_d_->Compute(gates + d2_, gates + d3_, ht); + vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); } private: @@ -352,8 +352,8 @@ class PeepholeKernelImpl : public LSTMKernel { act_cell, d)); \ } -REGISTER_JITKERNEL_ARGS(lstm, LSTMKernel, JITKERNEL_DECLARE_LSTM, - JITKERNEL_KEY_LSTM, JITKERNEL_NEW_LSTM_IMPL); +REGISTER_JITKERNEL_ARGS_DEPRECATED(lstm, LSTMKernel, JITKERNEL_DECLARE_LSTM, + JITKERNEL_KEY_LSTM, JITKERNEL_NEW_LSTM_IMPL); #undef INTRI8_FLOAT #undef JITKERNEL_DECLARE_LSTM @@ -378,13 +378,13 @@ class GRUKernelImpl : public GRUKernel { void ComputeH1(T* gates, T* ht) const override { act_gate_d_->Compute(gates, gates); act_state_d_->Compute(gates + d2_, gates + d2_); - vmul_d_->Compute(gates, gates + d2_, ht); + vmul_d_->Compute(gates, gates + d2_, ht, d_); } void ComputeHtPart1(T* gates, const T* ht_1, T* ht) const override { // W: {W_update, W_reset; W_state} act_gate_d2_->Compute(gates, gates); - vmul_d_->Compute(ht_1, gates + d_, ht); + vmul_d_->Compute(ht_1, gates + d_, ht, d_); } void ComputeHtPart2(T* gates, const T* ht_1, T* ht) const override { @@ -472,8 +472,8 @@ INTRI8_FLOAT(jit::avx512f); p = std::dynamic_pointer_cast>( \ std::make_shared>(act_gate, act_state, d)); -REGISTER_JITKERNEL_ARGS(gru, GRUKernel, JITKERNEL_DECLARE_GRU, - JITKERNEL_KEY_GRU, JITKERNEL_NEW_GRU_IMPL); +REGISTER_JITKERNEL_ARGS_DEPRECATED(gru, GRUKernel, JITKERNEL_DECLARE_GRU, + JITKERNEL_KEY_GRU, JITKERNEL_NEW_GRU_IMPL); #undef INTRI8_FLOAT #undef JITKERNEL_NEW_GRU_IMPL diff --git a/paddle/fluid/operators/math/jit_kernel_test.cc b/paddle/fluid/operators/math/jit_kernel_test.cc index c9e6ab740da4e39de3dc41f9df352b88e696c38d..cf0d6c60d19933690de62e04926f14e6a3c12e19 100644 --- a/paddle/fluid/operators/math/jit_kernel_test.cc +++ b/paddle/fluid/operators/math/jit_kernel_test.cc @@ -369,12 +369,12 @@ void lstm_ctht_better( int d2 = d * 2; vsigmoid_3d->Compute(gates + d, gates + d); vtanh_d->Compute(gates, gates); - vmul_d->Compute(gates, gates + d, gates + d); - vmul_d->Compute(ct_1, gates + d2, gates + d2); + vmul_d->Compute(gates, gates + d, gates + d, d); + vmul_d->Compute(ct_1, gates + d2, gates + d2, d); vadd_d->Compute(gates + d, gates + d2, ct); /* H_t = act_cell(C_t) * ogated */ vtanh_d->Compute(ct, gates + d2); - vmul_d->Compute(gates + d2, gates + d * 3, ht); + vmul_d->Compute(gates + d2, gates + d * 3, ht, d); } TEST(JitKernel, lstm) { @@ -578,7 +578,7 @@ void vmul_mkl(const int n, const float* x, const float* y, float* z) { TEST(JitKernel, vmul) { namespace jit = paddle::operators::math::jitkernel; - for (int d : {7, 8, 15, 16, 30, 256, 512}) { + for (int d : {7, 8, 15, 16, 30, 256, 512, 1000, 1024}) { std::vector x(d), y(d); std::vector zref(d), ztgt(d); RandomVec(d, x.data()); @@ -616,7 +616,7 @@ TEST(JitKernel, vmul) { auto ttgts = GetCurrentUS(); for (int i = 0; i < repeat; ++i) { - ker->Compute(x_data, y_data, ztgt_data); + ker->Compute(x_data, y_data, ztgt_data, d); } auto ttgte = GetCurrentUS(); @@ -800,8 +800,8 @@ TEST(JitKernel, pool) { EXPECT_TRUE(std::dynamic_pointer_cast(pvmul_f) != std::dynamic_pointer_cast(pvmul_d)); - const auto& pvmul_from_key = jit::KernelPool::Instance().Get("vmulf4"); + const auto& pvmul_from_key = jit::KernelPool::Instance().Get("vmulfany"); EXPECT_EQ(pvmul_f, pvmul_from_key); - const auto& pvmul_from_key2 = jit::KernelPool::Instance().Get("vmulf5"); + const auto& pvmul_from_key2 = jit::KernelPool::Instance().Get("vmulfjit"); EXPECT_TRUE(pvmul_from_key2 == nullptr); } diff --git a/paddle/fluid/operators/math/pooling.cc b/paddle/fluid/operators/math/pooling.cc index b871851798e48e6b598cb4ab8e2e42db478a3820..8df43bb616179e2487534e0acabb71b09b87e1af 100644 --- a/paddle/fluid/operators/math/pooling.cc +++ b/paddle/fluid/operators/math/pooling.cc @@ -31,7 +31,7 @@ class Pool2dFunctor { const framework::Tensor& input, const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_process, - framework::Tensor* output) { + bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -68,7 +68,8 @@ class Pool2dFunctor { pool_process.compute(input_data[h * input_width + w], &ele); } } - int pool_size = (hend - hstart) * (wend - wstart); + int pool_size = exclusive ? (hend - hstart) * (wend - wstart) + : ksize_height * ksize_width; pool_process.finalize(static_cast(pool_size), &ele); output_data[ph * output_width + pw] = ele; } @@ -93,7 +94,7 @@ class Pool2dGradFunctor { const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_grad_process, - framework::Tensor* input_grad) { + bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -124,7 +125,8 @@ class Pool2dGradFunctor { int wstart = pw * stride_width - padding_width; int wend = std::min(wstart + ksize_width, input_width); wstart = std::max(wstart, 0); - int pool_size = (hend - hstart) * (wend - wstart); + int pool_size = exclusive ? (hend - hstart) * (wend - wstart) + : ksize_height * ksize_width; float scale = 1.0 / pool_size; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { @@ -249,7 +251,7 @@ class Pool3dFunctor { const framework::Tensor& input, const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_process, - framework::Tensor* output) { + bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; @@ -300,7 +302,9 @@ class Pool3dFunctor { } } int pool_size = - (dend - dstart) * (hend - hstart) * (wend - wstart); + exclusive + ? (dend - dstart) * (hend - hstart) * (wend - wstart) + : ksize_depth * ksize_height * ksize_width; pool_process.finalize(static_cast(pool_size), &ele); output_data[output_idx] = ele; } @@ -326,7 +330,7 @@ class Pool3dGradFunctor { const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_grad_process, - framework::Tensor* input_grad) { + bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; @@ -369,7 +373,9 @@ class Pool3dGradFunctor { wstart = std::max(wstart, 0); int pool_size = - (dend - dstart) * (hend - hstart) * (wend - wstart); + exclusive + ? (dend - dstart) * (hend - hstart) * (wend - wstart) + : ksize_depth * ksize_height * ksize_width; float scale = 1.0 / pool_size; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { diff --git a/paddle/fluid/operators/math/pooling.cu b/paddle/fluid/operators/math/pooling.cu index b1c76350d1724629bae175abf47e6671a1532242..a689eb42242e551caa3470f34f7e8d7e80b6dfbe 100644 --- a/paddle/fluid/operators/math/pooling.cu +++ b/paddle/fluid/operators/math/pooling.cu @@ -29,7 +29,7 @@ __global__ void KernelPool2D(const int nthreads, const T* input_data, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, - T* output_data) { + bool exclusive, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; @@ -52,7 +52,8 @@ __global__ void KernelPool2D(const int nthreads, const T* input_data, pool_process.compute(input_data[h * input_width + w], &ele); } } - int pool_size = (hend - hstart) * (wend - wstart); + int pool_size = exclusive ? (hend - hstart) * (wend - wstart) + : ksize_height * ksize_width; pool_process.finalize(static_cast(pool_size), &ele); output_data[index] = ele; } @@ -65,7 +66,7 @@ __global__ void KernelPool2DGrad( const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, - PoolProcess pool_process, T* input_grad) { + PoolProcess pool_process, bool exclusive, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; @@ -95,7 +96,8 @@ __global__ void KernelPool2DGrad( int wend = min(wstart + ksize_width, input_width); hstart = max(hstart, 0); wstart = max(wstart, 0); - int pool_size = (hend - hstart) * (wend - wstart); + int pool_size = exclusive ? (hend - hstart) * (wend - wstart) + : ksize_height * ksize_width; int output_sub_idx = ph * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], @@ -163,7 +165,7 @@ class Pool2dFunctor { const framework::Tensor& input, const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_process, - framework::Tensor* output) { + bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; @@ -189,7 +191,8 @@ class Pool2dFunctor { KernelPool2D<<>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, - stride_width, padding_height, padding_width, pool_process, output_data); + stride_width, padding_height, padding_width, pool_process, exclusive, + output_data); } }; @@ -208,7 +211,7 @@ class Pool2dGradFunctor { const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_process, - framework::Tensor* input_grad) { + bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; @@ -236,7 +239,7 @@ class Pool2dGradFunctor { nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, - pool_process, input_grad_data); + pool_process, exclusive, input_grad_data); } }; @@ -313,16 +316,14 @@ template class Pool2dGradFunctor; template -__global__ void KernelPool3D(const int nthreads, const T* input_data, - const int channels, const int input_depth, - const int input_height, const int input_width, - const int output_depth, const int output_height, - const int output_width, const int ksize_depth, - const int ksize_height, const int ksize_width, - const int stride_depth, const int stride_height, - const int stride_width, const int padding_depth, - const int padding_height, const int padding_width, - PoolProcess pool_process, T* output_data) { +__global__ void KernelPool3D( + const int nthreads, const T* input_data, const int channels, + const int input_depth, const int input_height, const int input_width, + const int output_depth, const int output_height, const int output_width, + const int ksize_depth, const int ksize_height, const int ksize_width, + const int stride_depth, const int stride_height, const int stride_width, + const int padding_depth, const int padding_height, const int padding_width, + PoolProcess pool_process, bool exclusive, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; @@ -351,7 +352,9 @@ __global__ void KernelPool3D(const int nthreads, const T* input_data, } } } - int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); + int pool_size = exclusive + ? (dend - dstart) * (hend - hstart) * (wend - wstart) + : ksize_depth * ksize_height * ksize_width; pool_process.finalize(static_cast(pool_size), &ele); output_data[index] = ele; } @@ -366,7 +369,7 @@ __global__ void KernelPool3DGrad( const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, - T* input_grad) { + bool exclusive, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; @@ -409,7 +412,9 @@ __global__ void KernelPool3DGrad( dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); - int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); + int pool_size = + exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart) + : ksize_depth * ksize_height * ksize_width; int output_sub_idx = (pd * output_height + ph) * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], @@ -484,7 +489,7 @@ class Pool3dFunctor { const framework::Tensor& input, const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_process, - framework::Tensor* output) { + bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; @@ -517,7 +522,7 @@ class Pool3dFunctor { nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, - padding_depth, padding_height, padding_width, pool_process, + padding_depth, padding_height, padding_width, pool_process, exclusive, output_data); } }; @@ -537,7 +542,7 @@ class Pool3dGradFunctor { const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_process, - framework::Tensor* input_grad) { + bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; @@ -573,7 +578,7 @@ class Pool3dGradFunctor { input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, - padding_width, pool_process, input_grad_data); + padding_width, pool_process, exclusive, input_grad_data); } }; diff --git a/paddle/fluid/operators/math/pooling.h b/paddle/fluid/operators/math/pooling.h index 120f5919803806e0d3b7dc8eaf530ae89819b84d..0f64e321bf01eea69767af020ed8c1a75e31acb5 100644 --- a/paddle/fluid/operators/math/pooling.h +++ b/paddle/fluid/operators/math/pooling.h @@ -89,7 +89,7 @@ class Pool2dFunctor { const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_compute, - framework::Tensor* output); + bool exclusive, framework::Tensor* output); }; template @@ -101,7 +101,7 @@ class Pool2dGradFunctor { const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_compute, - framework::Tensor* input_grad); + bool exclusive, framework::Tensor* input_grad); }; template @@ -123,7 +123,7 @@ class Pool3dFunctor { const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_compute, - framework::Tensor* output); + bool exclusive, framework::Tensor* output); }; template @@ -135,7 +135,7 @@ class Pool3dGradFunctor { const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_compute, - framework::Tensor* input_grad); + bool exclusive, framework::Tensor* input_grad); }; template diff --git a/paddle/fluid/operators/math/sequence_pooling.cc b/paddle/fluid/operators/math/sequence_pooling.cc index 7be8539a7b0f1890898fd386a3056601fda8a7c3..6d491dbf1ed162ef07fda4c07e95cc57108486fd 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cc +++ b/paddle/fluid/operators/math/sequence_pooling.cc @@ -31,7 +31,7 @@ template using EigenMatrix = framework::EigenMatrix; -template +template class MaxSeqPoolFunctor { public: void operator()(const platform::CPUDeviceContext& context, @@ -70,7 +70,41 @@ class MaxSeqPoolFunctor { } } }; +// Instantisation of Max Sequence Pooling for test phase eg. no need to fill +// index buffer +template +class MaxSeqPoolFunctor { + public: + void operator()(const platform::CPUDeviceContext& context, + const framework::LoDTensor& input, framework::Tensor* output, + framework::Tensor* index) { + auto in_dims = input.dims(); + auto out_dims = output->dims(); + PADDLE_ENFORCE_GT(in_dims.size(), 1); + PADDLE_ENFORCE_GT(out_dims.size(), 1); + for (int64_t i = 1; i < in_dims.size(); ++i) { + PADDLE_ENFORCE_EQ(in_dims[i], out_dims[i]); + } + + auto starts = input.lod()[0]; + const T* in_data = input.data(); + T* out_data = output->data(); + int64_t num_seq = out_dims[0]; + int64_t dim = output->numel() / num_seq; + for (int64_t i = 0; i < num_seq; ++i) { + std::memcpy(&out_data[i * dim], &in_data[starts[i] * dim], + dim * sizeof(T)); + for (size_t j = starts[i] + 1; j < starts[i + 1]; ++j) { + for (int64_t k = 0; k < dim; ++k) { + if (in_data[j * dim + k] > out_data[i * dim + k]) { + out_data[i * dim + k] = in_data[j * dim + k]; + } + } + } + } + } +}; template class MaxSeqPoolGradFunctor { public: @@ -188,11 +222,16 @@ class SequencePoolFunctor { /* max pool has index output */ void operator()(const platform::CPUDeviceContext& context, const std::string pooltype, const framework::LoDTensor& input, - framework::Tensor* output, + framework::Tensor* output, bool is_test, framework::Tensor* index = nullptr) { if (pooltype == "MAX") { - math::MaxSeqPoolFunctor max_pool; - max_pool(context, input, output, index); + if (is_test) { + math::MaxSeqPoolFunctor max_pool; + max_pool(context, input, output, index); + } else { + math::MaxSeqPoolFunctor max_pool; + max_pool(context, input, output, index); + } return; } if (pooltype == "LAST") { @@ -200,6 +239,7 @@ class SequencePoolFunctor { last_pool(context, input, output); return; } + if (pooltype == "FIRST") { math::FirstSeqPoolFunctor first_pool; first_pool(context, input, output); diff --git a/paddle/fluid/operators/math/sequence_pooling.cu b/paddle/fluid/operators/math/sequence_pooling.cu index a92aef805a0434f2ebcbc62d4e5eaef0cfb21bfa..0015fafbc892912424dfa6dbd1778438d384ca19 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cu +++ b/paddle/fluid/operators/math/sequence_pooling.cu @@ -133,7 +133,7 @@ class SequencePoolFunctor { public: void operator()(const platform::CUDADeviceContext& context, const std::string pooltype, const framework::LoDTensor& input, - framework::Tensor* output, + framework::Tensor* output, bool is_test, framework::Tensor* index = nullptr) { auto& lod = input.lod()[0]; const size_t item_dim = output->numel() / output->dims()[0]; diff --git a/paddle/fluid/operators/math/sequence_pooling.h b/paddle/fluid/operators/math/sequence_pooling.h index 8dcbee65d0b63a137e5f422ec8667cc950641b4a..a1046ea2160d0ae9c2251612c97d3f2640b0aad1 100644 --- a/paddle/fluid/operators/math/sequence_pooling.h +++ b/paddle/fluid/operators/math/sequence_pooling.h @@ -28,7 +28,7 @@ class SequencePoolFunctor { /* max pool has index output */ void operator()(const DeviceContext& context, const std::string pooltype, const framework::LoDTensor& input, framework::Tensor* output, - framework::Tensor* index = nullptr); + bool is_test = false, framework::Tensor* index = nullptr); }; template diff --git a/paddle/fluid/operators/pool_cudnn_op.cu.cc b/paddle/fluid/operators/pool_cudnn_op.cu.cc index 31f083565fddee66aea1485ed71f41b6199f4502..1f090dc3d5439117d3b1a32bbdf5e66d33d4d133 100644 --- a/paddle/fluid/operators/pool_cudnn_op.cu.cc +++ b/paddle/fluid/operators/pool_cudnn_op.cu.cc @@ -41,6 +41,7 @@ class PoolCUDNNOpKernel : public framework::OpKernel { T *output_data = output->mutable_data(ctx.GetPlace()); std::string pooling_type = ctx.Attr("pooling_type"); + bool exclusive = ctx.Attr("exclusive"); std::vector ksize = ctx.Attr>("ksize"); std::vector strides = ctx.Attr>("strides"); std::vector paddings = ctx.Attr>("paddings"); @@ -72,7 +73,8 @@ class PoolCUDNNOpKernel : public framework::OpKernel { if (pooling_type == "max") { pooling_mode = PoolingMode::kMaximum; } else { - pooling_mode = PoolingMode::kAverage; + pooling_mode = exclusive ? PoolingMode::kAverageExclusive + : PoolingMode::kAverageInclusive; } cudnnPoolingDescriptor_t cudnn_pool_desc = @@ -101,6 +103,7 @@ class PoolCUDNNGradOpKernel : public framework::OpKernel { Tensor *input_grad = ctx.Output(framework::GradVarName("X")); std::string pooling_type = ctx.Attr("pooling_type"); + bool exclusive = ctx.Attr("exclusive"); std::vector ksize = ctx.Attr>("ksize"); std::vector strides = ctx.Attr>("strides"); std::vector paddings = ctx.Attr>("paddings"); @@ -141,7 +144,8 @@ class PoolCUDNNGradOpKernel : public framework::OpKernel { pooling_mode = PoolingMode::kMaximum; } } else { - pooling_mode = PoolingMode::kAverage; + pooling_mode = exclusive ? PoolingMode::kAverageExclusive + : PoolingMode::kAverageInclusive; } cudnnPoolingDescriptor_t cudnn_pool_desc = diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index 24a5346b031008531fcefff0e6f1c31da33d1c3b..484cb65746612343fafc49fe61b607f2e919cf4f 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -180,6 +180,12 @@ void Pool2dOpMaker::Make() { "operator." "If global_pooling = true, paddings and ksize will be ignored.") .SetDefault({0, 0}); + AddAttr( + "exclusive", + "(bool, default True) When true, will exclude the zero-padding in the " + "averaging calculating, otherwise, include the zero-padding. Note, it " + "is only used when pooling_type is avg. The defalut is True.") + .SetDefault(true); AddAttr( "use_cudnn", "(bool, default false) Only used in cudnn kernel, need install cudnn") @@ -236,6 +242,23 @@ Example: W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1 $$ + For exclusive = true: + $$ + hstart = i * strides[0] - paddings[0] + hend = hstart + ksize[0] + wstart = j * strides[1] - paddings[1] + wend = wstart + ksize[1] + Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]} + $$ + For exclusive = false: + $$ + hstart = max(0, i * strides[0] - paddings[0]) + hend = min(H, hstart + ksize[0]) + wstart = max(0, j * strides[1] - paddings[1]) + wend = min(W, wstart + ksize[1]) + Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)} + $$ + )DOC"); } @@ -283,6 +306,12 @@ void Pool3dOpMaker::Make() { "If global_pooling = true, ksize and paddings will be ignored.") .SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) + AddAttr( + "exclusive", + "(bool, default True) When true, will exclude the zero-padding in the " + "averaging calculating, otherwise, include the zero-padding. Note, it " + "is only used when pooling_type is avg. The defalut is True.") + .SetDefault(true); AddAttr( "use_cudnn", diff --git a/paddle/fluid/operators/pool_op.h b/paddle/fluid/operators/pool_op.h index a63963ca926bb94ff99e5cfe6dbcb2b15075bcb8..c0594b7e3cc5602a44bb01951a22c2135ba5c7ce 100644 --- a/paddle/fluid/operators/pool_op.h +++ b/paddle/fluid/operators/pool_op.h @@ -69,6 +69,7 @@ class PoolKernel : public framework::OpKernel { std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); + bool exclusive = context.Attr("exclusive"); if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) { paddings[i] = 0; @@ -84,7 +85,7 @@ class PoolKernel : public framework::OpKernel { pool2d_forward; paddle::operators::math::MaxPool pool_process; pool2d_forward(dev_ctx, *in_x, ksize, strides, paddings, pool_process, - out); + true, out); } else if (pooling_type == "avg") { paddle::operators::math::Pool2dFunctor< @@ -92,7 +93,7 @@ class PoolKernel : public framework::OpKernel { pool2d_forward; paddle::operators::math::AvgPool pool_process; pool2d_forward(dev_ctx, *in_x, ksize, strides, paddings, pool_process, - out); + exclusive, out); } } break; case 3: { @@ -102,14 +103,14 @@ class PoolKernel : public framework::OpKernel { pool3d_forward; paddle::operators::math::MaxPool pool_process; pool3d_forward(dev_ctx, *in_x, ksize, strides, paddings, pool_process, - out); + true, out); } else if (pooling_type == "avg") { paddle::operators::math::Pool3dFunctor< DeviceContext, paddle::operators::math::AvgPool, T> pool3d_forward; paddle::operators::math::AvgPool pool_process; pool3d_forward(dev_ctx, *in_x, ksize, strides, paddings, pool_process, - out); + exclusive, out); } } break; default: { PADDLE_THROW("Pool op only supports 2D and 3D input."); } @@ -131,6 +132,7 @@ class PoolGradKernel : public framework::OpKernel { std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); + bool exclusive = context.Attr("exclusive"); if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) { @@ -157,7 +159,7 @@ class PoolGradKernel : public framework::OpKernel { pool2d_backward; paddle::operators::math::AvgPoolGrad pool_process; pool2d_backward(dev_ctx, *in_x, *out, *out_grad, ksize, strides, - paddings, pool_process, in_x_grad); + paddings, pool_process, exclusive, in_x_grad); } } break; case 3: { @@ -172,7 +174,7 @@ class PoolGradKernel : public framework::OpKernel { pool3d_backward; paddle::operators::math::AvgPoolGrad pool_process; pool3d_backward(dev_ctx, *in_x, *out, *out_grad, ksize, strides, - paddings, pool_process, in_x_grad); + paddings, pool_process, exclusive, in_x_grad); } } break; default: { PADDLE_THROW("Pool op only supports 2D and 3D input."); } diff --git a/paddle/fluid/operators/sequence_pool_op.cc b/paddle/fluid/operators/sequence_pool_op.cc index 15d3f064eb7b025dc9a85b2aabad24186061cbd4..217bb1610fd3f02f0f72d3b7750ebcdfad243f48 100644 --- a/paddle/fluid/operators/sequence_pool_op.cc +++ b/paddle/fluid/operators/sequence_pool_op.cc @@ -47,6 +47,7 @@ class SequencePoolOpMaker : public framework::OpProtoAndCheckerMaker { "(Tensor) This tensor is used for the sequence max-pooling " "to record the max indexes.") .AsIntermediate(); + AddAttr("is_test", "").SetDefault(false); AddAttr( "pooltype", "(string, default 'AVERAGE') the pooling pooltype of SequencePoolOp.") diff --git a/paddle/fluid/operators/sequence_pool_op.h b/paddle/fluid/operators/sequence_pool_op.h index 2aa20792f24305a106c500a3d7a6e3d363bc31d8..f2e4a55dee49664b2fc09813f6dba5f68aaf11d5 100644 --- a/paddle/fluid/operators/sequence_pool_op.h +++ b/paddle/fluid/operators/sequence_pool_op.h @@ -32,10 +32,6 @@ class SequencePoolKernel : public framework::OpKernel { auto* in = context.Input("X"); auto* out = context.Output("Out"); std::string pooltype = context.Attr("pooltype"); - Tensor* index = nullptr; - if (pooltype == "MAX") { - index = context.Output("MaxIndex"); - } auto dims = in->dims(); auto lod = in->lod(); @@ -48,13 +44,22 @@ class SequencePoolKernel : public framework::OpKernel { dims[0] = lod[0].size() - 1; out->Resize({dims}); out->mutable_data(context.GetPlace()); - if (pooltype == "MAX") { + Tensor* index = nullptr; + + const bool is_test = context.Attr("is_test"); + + // Do not create index buffer for inference (is_test) mode + // TODO(jczaja): Skip index buffer creation for other devices eg. GPU + if (pooltype == "MAX" && + (is_test == false || + platform::is_cpu_place(context.GetPlace()) == false)) { + index = context.Output("MaxIndex"); index->Resize({dims}); index->mutable_data(context.GetPlace()); } math::SequencePoolFunctor pool; pool(context.template device_context(), pooltype, *in, out, - index); + is_test, index); } }; diff --git a/paddle/fluid/operators/sign_op.cc b/paddle/fluid/operators/sign_op.cc index f3985dcc027f974e0213a73ea9a21e268d77615f..6837856a6da804e27af2cd6c83052c04f17140d8 100644 --- a/paddle/fluid/operators/sign_op.cc +++ b/paddle/fluid/operators/sign_op.cc @@ -67,4 +67,5 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(sign, ops::SignOp, ops::SignOpMaker, ops::SignGradMaker); REGISTER_OP_CPU_KERNEL( - sign, ops::SignKernel); + sign, ops::SignKernel, + ops::SignKernel); diff --git a/paddle/fluid/operators/sign_op.cu b/paddle/fluid/operators/sign_op.cu index e0d7a87e6485a74dd1cdee1a05abc42eef460990..817e0fbbd511462f161633242d28e63062676eb9 100644 --- a/paddle/fluid/operators/sign_op.cu +++ b/paddle/fluid/operators/sign_op.cu @@ -13,7 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/sign_op.h" +#include "paddle/fluid/platform/float16.h" REGISTER_OP_CUDA_KERNEL( sign, - paddle::operators::SignKernel); + paddle::operators::SignKernel, + paddle::operators::SignKernel, + paddle::operators::SignKernel); diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc index 1a9324ec862fc3dd7ce669c5fed94527cac22b8f..2900221485e6ec097796ac38936ce31f8382c86a 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc @@ -44,6 +44,12 @@ class SoftmaxWithCrossEntropyOpMaker "(bool, default: false), A flag to indicate whether to interpretate " "the given labels as soft labels.") .SetDefault(false); + AddAttr( + "numeric_stable_mode", + "(bool, default: false), A flag to indicate whether to use more " + "numerically stable algorithm. This flag is only valid when " + "soft_label is false and GPU is used.") + .SetDefault(false); AddAttr( "ignore_index", "(int, default -100), Specifies a target value that is ignored and" diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu index a07c17348ebb3f768d1c8be65c2d31e3c130bd23..6d48796191dd13a45f0c7267bfaf05489f528a9d 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu @@ -17,6 +17,7 @@ limitations under the License. */ #include #include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/operators/softmax_with_cross_entropy_op.h" +#include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { @@ -117,8 +118,8 @@ using BlockReduceTempStorage = typename BlockReduce::TempStorage; // Make sure that BlockDim <= feature_size // This kernel is used to calculate the max element of each row template -__global__ void RowReductionForMax(const T* logits_data, T* max_data, - int feature_size) { +static __global__ void RowReductionForMax(const T* logits_data, T* max_data, + int feature_size) { __shared__ BlockReduceTempStorage temp_storage; auto beg_idx = feature_size * blockIdx.x + threadIdx.x; @@ -141,9 +142,10 @@ __global__ void RowReductionForMax(const T* logits_data, T* max_data, } // Make sure that BlockDim <= feature_size -template -__global__ void RowReductionForDiffMaxSum(const T* logits_data, T* max_data, - T* softmax, int feature_size) { +template +static __global__ void RowReductionForDiffMaxSum(const T* logits_data, + T* max_data, T* softmax, + int feature_size) { __shared__ BlockReduceTempStorage temp_storage; auto beg_idx = feature_size * blockIdx.x + threadIdx.x; @@ -153,24 +155,34 @@ __global__ void RowReductionForDiffMaxSum(const T* logits_data, T* max_data, softmax[beg_idx] = logits_data[beg_idx] - block_max; T diff_max_sum = real_exp(softmax[beg_idx]); - beg_idx += BlockDim; - while (beg_idx < end_idx) { - softmax[beg_idx] = logits_data[beg_idx] - block_max; - diff_max_sum += real_exp(softmax[beg_idx]); - beg_idx += BlockDim; + auto idx = beg_idx + BlockDim; + while (idx < end_idx) { + softmax[idx] = logits_data[idx] - block_max; + diff_max_sum += real_exp(softmax[idx]); + idx += BlockDim; } diff_max_sum = BlockReduce(temp_storage).Reduce(diff_max_sum, cub::Sum()); if (threadIdx.x == 0) max_data[blockIdx.x] = real_log(diff_max_sum); + + if (!CalculateLogSoftmax) return; + __syncthreads(); + diff_max_sum = max_data[blockIdx.x]; + softmax[beg_idx] -= diff_max_sum; + beg_idx += BlockDim; + while (beg_idx < end_idx) { + softmax[beg_idx] -= diff_max_sum; + beg_idx += BlockDim; + } + if (threadIdx.x == 0) max_data[blockIdx.x] = 0; } // Make sure that BlockDim <= feature_size template -__global__ void RowReductionForSoftmaxAndCrossEntropy(const T* logits_data, - const T* labels_data, - T* loss_data, T* softmax, - int feature_size) { +static __global__ void RowReductionForSoftmaxAndCrossEntropy( + const T* logits_data, const T* labels_data, T* loss_data, T* softmax, + int feature_size) { __shared__ BlockReduceTempStorage temp_storage; auto beg_idx = feature_size * blockIdx.x + threadIdx.x; @@ -194,11 +206,134 @@ __global__ void RowReductionForSoftmaxAndCrossEntropy(const T* logits_data, } template -__global__ void SetSoftmaxToOneWhenFeatureSizeIsOne(T* out, int batch_size) { +struct HardLabelSoftmaxWithCrossEntropyFunctor { + public: + HardLabelSoftmaxWithCrossEntropyFunctor(const T* logits, + const int64_t* labels, T* loss, + T* log_softmax, int feature_size) + : logits_(logits), + labels_(labels), + loss_(loss), + log_softmax_(log_softmax), + feature_size_(feature_size) {} + + __device__ void operator()(int idx) const { + auto row_idx = idx / feature_size_; + auto col_idx = idx % feature_size_; + if (col_idx != labels_[row_idx]) { + log_softmax_[idx] = real_exp(log_softmax_[idx]); + } else { + auto softmax = log_softmax_[idx]; + log_softmax_[idx] = real_exp(softmax); + loss_[row_idx] = -softmax; + } + } + + private: + const T* logits_; + const int64_t* labels_; + T* loss_; + T* log_softmax_; + int feature_size_; +}; + +template +struct HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx { + public: + HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx(const T* logits, + const int64_t* labels, + T* loss, T* log_softmax, + int feature_size, + int ignore_idx) + : logits_(logits), + labels_(labels), + loss_(loss), + log_softmax_(log_softmax), + feature_size_(feature_size), + ignore_idx_(ignore_idx) {} + + __device__ void operator()(int idx) const { + auto row_idx = idx / feature_size_; + auto col_idx = idx % feature_size_; + if (col_idx != labels_[row_idx] || col_idx == ignore_idx_) { + log_softmax_[idx] = real_exp(log_softmax_[idx]); + } else { + auto softmax = log_softmax_[idx]; + log_softmax_[idx] = real_exp(softmax); + loss_[row_idx] = -softmax; + } + } + + private: + const T* logits_; + const int64_t* labels_; + T* loss_; + T* log_softmax_; + int feature_size_; + int ignore_idx_; +}; + +template +static __global__ void SetSoftmaxToOneWhenFeatureSizeIsOne(T* out, + int batch_size) { auto idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < batch_size) out[idx] = static_cast(1); } +template +static void HardLabelSoftmaxWithCrossEntropy( + const platform::CUDADeviceContext& ctx, const T* logits_data, + const int64_t* labels_data, T* loss_data, T* softmax_data, int batch_size, + int feature_size, int ignore_idx) { + constexpr int kMaxBlockDim = 512; + int block_dim = feature_size >= kMaxBlockDim + ? kMaxBlockDim + : (1 << static_cast(std::log2(feature_size))); + auto stream = ctx.stream(); + +#define CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(BlockDim) \ + case BlockDim: { \ + RowReductionForMax<<>>( \ + logits_data, loss_data, feature_size); \ + RowReductionForDiffMaxSum<<>>( \ + logits_data, loss_data, softmax_data, feature_size); \ + platform::ForRange for_range( \ + ctx, batch_size* feature_size); \ + if (ignore_idx >= 0 && ignore_idx < feature_size) { \ + for_range(HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx( \ + logits_data, labels_data, loss_data, softmax_data, feature_size, \ + ignore_idx)); \ + } else { \ + for_range(HardLabelSoftmaxWithCrossEntropyFunctor( \ + logits_data, labels_data, loss_data, softmax_data, feature_size)); \ + } \ + } break + + switch (block_dim) { + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(512); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(256); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(128); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(64); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(32); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(16); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(8); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(4); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(2); + case 1: + SetSoftmaxToOneWhenFeatureSizeIsOne<<<(batch_size + kMaxBlockDim - 1) / + kMaxBlockDim, + kMaxBlockDim, 0, stream>>>( + softmax_data, batch_size); + cudaMemsetAsync(loss_data, 0, batch_size * sizeof(T), stream); + break; + default: + PADDLE_THROW("BlockDim must be 2^n in softmax_with_cross_entropy_op"); + break; + } +#undef CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL +} + template static void SoftmaxWithCrossEntropyFusedKernel(const T* logits_data, const T* labels_data, @@ -237,7 +372,7 @@ static void SoftmaxWithCrossEntropyFusedKernel(const T* logits_data, kMaxBlockDim, kMaxBlockDim, 0, stream>>>( softmax_data, batch_size); - cudaMemsetAsync(loss_data, 0, batch_size, stream); + cudaMemsetAsync(loss_data, 0, batch_size * sizeof(T), stream); break; default: PADDLE_THROW("BlockDim must be 2^n in softmax_with_cross_entropy_op"); @@ -272,11 +407,21 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { logits_data, labels_data, softmax_data, loss_data, batch_size, feature_size, context.cuda_device_context().stream()); } else { - math::SoftmaxCUDNNFunctor()(context.cuda_device_context(), logits, - softmax); - math::CrossEntropyFunctor()( - context.cuda_device_context(), loss, softmax, labels, false, - ignore_index); + if (!context.Attr("numeric_stable_mode")) { + math::SoftmaxCUDNNFunctor()(context.cuda_device_context(), logits, + softmax); + math::CrossEntropyFunctor()( + context.cuda_device_context(), loss, softmax, labels, false, + ignore_index); + } else { + int batch_size = logits->dims()[0]; + int feature_size = logits->dims()[1]; + auto* logits_data = logits->data(); + auto* labels_data = labels->data(); + HardLabelSoftmaxWithCrossEntropy( + context.cuda_device_context(), logits_data, labels_data, loss_data, + softmax_data, batch_size, feature_size, ignore_index); + } } } }; diff --git a/paddle/fluid/operators/spp_op.h b/paddle/fluid/operators/spp_op.h index 08cb7849d20443862b66ea6096c095b294c7242c..35d9737ee01fe1505cbe30e8ed735e6b92cb8df2 100644 --- a/paddle/fluid/operators/spp_op.h +++ b/paddle/fluid/operators/spp_op.h @@ -56,12 +56,14 @@ class SppKernel : public framework::OpKernel { math::Pool2dFunctor, T> pool_forward; math::MaxPool max_process; pool_forward(context.template device_context(), *in_x, - kernel_size, strides, paddings, max_process, &out_level); + kernel_size, strides, paddings, max_process, true, + &out_level); } else if (pooling_type == "avg") { math::Pool2dFunctor, T> pool_forward; math::AvgPool avg_process; pool_forward(context.template device_context(), *in_x, - kernel_size, strides, paddings, avg_process, &out_level); + kernel_size, strides, paddings, avg_process, true, + &out_level); } // flatten pooling output shape int output_flatten_w = in_x->dims()[1] * bins * bins; @@ -154,7 +156,7 @@ class SppGradKernel : public framework::OpKernel { math::AvgPoolGrad avg_process; pool_backward(context.template device_context(), *in_x, *&out_level, *&outgrad_level, kernel_size, strides, - paddings, avg_process, in_x_grad); + paddings, avg_process, true, in_x_grad); } } } diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index 6fe30630e9683f59044b216b9e9b1f7dd647b1e2..d19ac9839c90a116265b761e3b1b3f855e2d95e8 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -67,6 +67,7 @@ class SumOp : public framework::OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { auto x_vars = ctx.MultiInputVar("X"); + auto x_vars_name = ctx.Inputs("X"); framework::LibraryType library{framework::LibraryType::kPlain}; framework::DataLayout layout{framework::DataLayout::kAnyLayout}; @@ -81,10 +82,11 @@ class SumOp : public framework::OperatorWithKernel { if (x_vars[0]->IsType()) { int dtype = -1; - for (auto& x_var : x_vars) { + for (size_t idx = 0; idx < x_vars.size(); ++idx) { + PADDLE_ENFORCE(x_vars[idx] != nullptr, + "Input var[%s] should not be nullptr", x_vars_name[idx]); // FIXME(zcd): The input x_var may be SelectedRows or LoDTensor. - auto tensor = framework::GetTensorFromVar( - const_cast(x_var)); + auto tensor = framework::GetTensorFromVar(*x_vars[idx]); if (tensor->numel() == 0) { continue; } diff --git a/paddle/fluid/platform/cudnn_helper.h b/paddle/fluid/platform/cudnn_helper.h index bb8b14bb9fa41942c3aa653ca224c0842fbf9a00..07bb02be1962f758e50cab1f27de43e89f3953c3 100644 --- a/paddle/fluid/platform/cudnn_helper.h +++ b/paddle/fluid/platform/cudnn_helper.h @@ -76,8 +76,9 @@ enum class DataLayout { // Not use enum class PoolingMode { kMaximum, - kAverage, kMaximumDeterministic, + kAverageExclusive, + kAverageInclusive, }; #if CUDNN_VERSION < 6000 @@ -91,8 +92,10 @@ inline cudnnPoolingMode_t GetPoolingMode(const PoolingMode& mode) { switch (mode) { case PoolingMode::kMaximumDeterministic: return CUDNN_POOLING_MAX; - case PoolingMode::kAverage: + case PoolingMode::kAverageExclusive: return CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; + case PoolingMode::kAverageInclusive: + return CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING; case PoolingMode::kMaximum: return CUDNN_POOLING_MAX; default: @@ -105,8 +108,10 @@ inline cudnnPoolingMode_t GetPoolingMode(const PoolingMode& mode) { switch (mode) { case PoolingMode::kMaximumDeterministic: return CUDNN_POOLING_MAX_DETERMINISTIC; - case PoolingMode::kAverage: + case PoolingMode::kAverageExclusive: return CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; + case PoolingMode::kAverageInclusive: + return CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING; case PoolingMode::kMaximum: return CUDNN_POOLING_MAX; default: @@ -341,6 +346,28 @@ class ScopedPoolingDescriptor { DISABLE_COPY_AND_ASSIGN(ScopedPoolingDescriptor); }; +class ScopedSpatialTransformerDescriptor { + public: + ScopedSpatialTransformerDescriptor() { + PADDLE_ENFORCE(dynload::cudnnCreateSpatialTransformerDescriptor(&desc_)); + } + ~ScopedSpatialTransformerDescriptor() { + PADDLE_ENFORCE(dynload::cudnnDestroySpatialTransformerDescriptor(desc_)); + } + + template + inline cudnnSpatialTransformerDescriptor_t descriptor(const int nbDims, + const int dimA[]) { + PADDLE_ENFORCE(dynload::cudnnSetSpatialTransformerNdDescriptor( + desc_, CUDNN_SAMPLER_BILINEAR, CudnnDataType::type, nbDims, dimA)); + return desc_; + } + + private: + cudnnSpatialTransformerDescriptor_t desc_; + DISABLE_COPY_AND_ASSIGN(ScopedSpatialTransformerDescriptor); +}; + inline bool CanCUDNNBeUsed(const framework::ExecutionContext& ctx) { bool use_cudnn = ctx.Attr("use_cudnn"); use_cudnn &= paddle::platform::is_gpu_place(ctx.GetPlace()); diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index b0de636de46451c8b05546fdbff142f984c2bb43..924810bd61841139bc1849a000aaa57b07b71f0c 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -32,23 +32,25 @@ platform::DeviceContext* DeviceContextPool::Get(const platform::Place& place) { "'Place' is not supported, Please re-compile with WITH_GPU " "option"); } - return it->second.get(); + return it->second.get().get(); } -const std::vector -DeviceContextPool::GetAllDeviceContexts() const { - std::vector all_device_ctx; - all_device_ctx.reserve(device_contexts_.size()); - for (auto& dev_ctx : device_contexts_) { - all_device_ctx.emplace_back(dev_ctx.second.get()); - } - return all_device_ctx; +template +inline void EmplaceDeviceContext( + std::map>>* + map_ptr, + platform::Place p) { + using PtrType = std::unique_ptr; + map_ptr->emplace(p, std::async(std::launch::deferred, [=] { + // lazy evaluation. i.e., only create device context at + // first `Get` + return PtrType(new DevCtx(boost::get(p))); + })); } DeviceContextPool::DeviceContextPool( const std::vector& places) { PADDLE_ENFORCE_GT(places.size(), 0); - using PtrType = std::unique_ptr; std::set set; for (auto& p : places) { set.insert(p); @@ -57,16 +59,13 @@ DeviceContextPool::DeviceContextPool( for (auto& p : set) { if (platform::is_cpu_place(p)) { #ifdef PADDLE_WITH_MKLDNN - device_contexts_.emplace( - p, PtrType(new MKLDNNDeviceContext(boost::get(p)))); + EmplaceDeviceContext(&device_contexts_, p); #else - device_contexts_.emplace( - p, PtrType(new CPUDeviceContext(boost::get(p)))); + EmplaceDeviceContext(&device_contexts_, p); #endif } else if (platform::is_gpu_place(p)) { #ifdef PADDLE_WITH_CUDA - device_contexts_.emplace( - p, PtrType(new CUDADeviceContext(boost::get(p)))); + EmplaceDeviceContext(&device_contexts_, p); #else PADDLE_THROW( "'CUDAPlace' is not supported, Please re-compile with WITH_GPU " @@ -74,9 +73,8 @@ DeviceContextPool::DeviceContextPool( #endif } else if (platform::is_cuda_pinned_place(p)) { #ifdef PADDLE_WITH_CUDA - device_contexts_.emplace( - p, - PtrType(new CUDAPinnedDeviceContext(boost::get(p)))); + EmplaceDeviceContext( + &device_contexts_, p); #else PADDLE_THROW( "'CUDAPlace' is not supported, Please re-compile with WITH_GPU " diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index 942e13a724339dc85ed1fc72c11e208ddce36dbb..0240b9380f3213b2a030061007e04abe1d73c6e3 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -10,6 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include // NOLINT #include #include // NOLINT #include @@ -223,9 +224,6 @@ class DeviceContextPool { /*! \brief Return handle of single device context. */ platform::DeviceContext* Get(const platform::Place& place); - /*! \brief Return all the device contexts. */ - const std::vector GetAllDeviceContexts() const; - template const typename DefaultDeviceContextType::TYPE* GetByPlace( const Place& place) { @@ -237,7 +235,8 @@ class DeviceContextPool { private: static DeviceContextPool* pool; - std::map> device_contexts_; + std::map>> + device_contexts_; DISABLE_COPY_AND_ASSIGN(DeviceContextPool); }; diff --git a/paddle/fluid/platform/dynload/cudnn.h b/paddle/fluid/platform/dynload/cudnn.h index e6353f67ef118072a2d8e49111e8ecc486589998..d3d754b6f58d25a9dfacafaf55d50b353a71ee6d 100644 --- a/paddle/fluid/platform/dynload/cudnn.h +++ b/paddle/fluid/platform/dynload/cudnn.h @@ -65,44 +65,51 @@ extern void EnforceCUDNNLoaded(const char* fn_name); * include all needed cudnn functions in HPPL * different cudnn version has different interfaces **/ -#define CUDNN_DNN_ROUTINE_EACH(__macro) \ - __macro(cudnnSetTensor4dDescriptor); \ - __macro(cudnnSetTensor4dDescriptorEx); \ - __macro(cudnnSetTensorNdDescriptor); \ - __macro(cudnnGetTensorNdDescriptor); \ - __macro(cudnnGetConvolutionNdForwardOutputDim); \ - __macro(cudnnGetConvolutionForwardAlgorithm); \ - __macro(cudnnCreateTensorDescriptor); \ - __macro(cudnnDestroyTensorDescriptor); \ - __macro(cudnnCreateFilterDescriptor); \ - __macro(cudnnSetFilter4dDescriptor); \ - __macro(cudnnSetFilterNdDescriptor); \ - __macro(cudnnGetFilterNdDescriptor); \ - __macro(cudnnSetPooling2dDescriptor); \ - __macro(cudnnSetPoolingNdDescriptor); \ - __macro(cudnnGetPoolingNdDescriptor); \ - __macro(cudnnDestroyFilterDescriptor); \ - __macro(cudnnCreateConvolutionDescriptor); \ - __macro(cudnnCreatePoolingDescriptor); \ - __macro(cudnnDestroyPoolingDescriptor); \ - __macro(cudnnSetConvolution2dDescriptor); \ - __macro(cudnnDestroyConvolutionDescriptor); \ - __macro(cudnnSetConvolutionNdDescriptor); \ - __macro(cudnnGetConvolutionNdDescriptor); \ - __macro(cudnnDeriveBNTensorDescriptor); \ - __macro(cudnnCreate); \ - __macro(cudnnDestroy); \ - __macro(cudnnSetStream); \ - __macro(cudnnActivationForward); \ - __macro(cudnnConvolutionForward); \ - __macro(cudnnConvolutionBackwardBias); \ - __macro(cudnnGetConvolutionForwardWorkspaceSize); \ - __macro(cudnnTransformTensor); \ - __macro(cudnnPoolingForward); \ - __macro(cudnnPoolingBackward); \ - __macro(cudnnSoftmaxBackward); \ - __macro(cudnnSoftmaxForward); \ - __macro(cudnnGetVersion); \ +#define CUDNN_DNN_ROUTINE_EACH(__macro) \ + __macro(cudnnSetTensor4dDescriptor); \ + __macro(cudnnSetTensor4dDescriptorEx); \ + __macro(cudnnSetTensorNdDescriptor); \ + __macro(cudnnGetTensorNdDescriptor); \ + __macro(cudnnGetConvolutionNdForwardOutputDim); \ + __macro(cudnnGetConvolutionForwardAlgorithm); \ + __macro(cudnnCreateTensorDescriptor); \ + __macro(cudnnDestroyTensorDescriptor); \ + __macro(cudnnCreateFilterDescriptor); \ + __macro(cudnnSetFilter4dDescriptor); \ + __macro(cudnnSetFilterNdDescriptor); \ + __macro(cudnnGetFilterNdDescriptor); \ + __macro(cudnnSetPooling2dDescriptor); \ + __macro(cudnnSetPoolingNdDescriptor); \ + __macro(cudnnGetPoolingNdDescriptor); \ + __macro(cudnnDestroyFilterDescriptor); \ + __macro(cudnnCreateConvolutionDescriptor); \ + __macro(cudnnCreatePoolingDescriptor); \ + __macro(cudnnDestroyPoolingDescriptor); \ + __macro(cudnnSetConvolution2dDescriptor); \ + __macro(cudnnDestroyConvolutionDescriptor); \ + __macro(cudnnSetConvolutionNdDescriptor); \ + __macro(cudnnGetConvolutionNdDescriptor); \ + __macro(cudnnDeriveBNTensorDescriptor); \ + __macro(cudnnCreateSpatialTransformerDescriptor); \ + __macro(cudnnSetSpatialTransformerNdDescriptor); \ + __macro(cudnnDestroySpatialTransformerDescriptor); \ + __macro(cudnnSpatialTfGridGeneratorForward); \ + __macro(cudnnSpatialTfGridGeneratorBackward); \ + __macro(cudnnSpatialTfSamplerForward); \ + __macro(cudnnSpatialTfSamplerBackward); \ + __macro(cudnnCreate); \ + __macro(cudnnDestroy); \ + __macro(cudnnSetStream); \ + __macro(cudnnActivationForward); \ + __macro(cudnnConvolutionForward); \ + __macro(cudnnConvolutionBackwardBias); \ + __macro(cudnnGetConvolutionForwardWorkspaceSize); \ + __macro(cudnnTransformTensor); \ + __macro(cudnnPoolingForward); \ + __macro(cudnnPoolingBackward); \ + __macro(cudnnSoftmaxBackward); \ + __macro(cudnnSoftmaxForward); \ + __macro(cudnnGetVersion); \ __macro(cudnnGetErrorString); CUDNN_DNN_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_CUDNN_WRAP) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 5f15a29f4c3e9b1412912fe4723642d1ede60346..7c7b14df6618bd636f3636612486884b573309fb 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -821,6 +821,13 @@ All parameter, weight, gradient are variables in Paddle. [](BuildStrategy &self, bool b) { self.enable_data_balance_ = b; }) // FIXME(chengudo): enable_data_balance seems not important + .def_property("enable_sequential_execution", + [](const BuildStrategy &self) { + return self.enable_sequential_execution_; + }, + [](BuildStrategy &self, bool b) { + self.enable_sequential_execution_ = b; + }) .def_property( "fuse_elewise_add_act_ops", [](const BuildStrategy &self) { diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 5a71382fb14b64989502c34d8ac0aa13c62bc7d0..d7676f89ab5e781f910f98d03e72d5f7c1023a9a 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -147,13 +147,11 @@ function cmake_gen() { -DWITH_SWIG_PY=${WITH_SWIG_PY:-ON} -DCUDNN_ROOT=/usr/ -DWITH_TESTING=${WITH_TESTING:-ON} - -DWITH_FAST_BUNDLE_TEST=ON -DCMAKE_MODULE_PATH=/opt/rocm/hip/cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DWITH_CONTRIB=${WITH_CONTRIB:-ON} - -DWITH_INFERENCE=${WITH_INFERENCE:-ON} -DWITH_INFERENCE_API_TEST=${WITH_INFERENCE_API_TEST:-ON} -DINFERENCE_DEMO_INSTALL_DIR=${INFERENCE_DEMO_INSTALL_DIR} -DWITH_ANAKIN=${WITH_ANAKIN:-OFF} @@ -181,12 +179,10 @@ EOF -DWITH_PYTHON=${WITH_PYTHON:-ON} \ -DCUDNN_ROOT=/usr/ \ -DWITH_TESTING=${WITH_TESTING:-ON} \ - -DWITH_FAST_BUNDLE_TEST=ON \ -DCMAKE_MODULE_PATH=/opt/rocm/hip/cmake \ -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} \ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ -DWITH_CONTRIB=${WITH_CONTRIB:-ON} \ - -DWITH_INFERENCE=${WITH_INFERENCE:-ON} \ -DWITH_INFERENCE_API_TEST=${WITH_INFERENCE_API_TEST:-ON} \ -DINFERENCE_DEMO_INSTALL_DIR=${INFERENCE_DEMO_INSTALL_DIR} \ -DWITH_ANAKIN=${WITH_ANAKIN:-OFF} \ @@ -653,7 +649,7 @@ function gen_capi_package() { function gen_fluid_lib() { mkdir -p ${PADDLE_ROOT}/build cd ${PADDLE_ROOT}/build - if [[ ${WITH_C_API:-OFF} == "OFF" && ${WITH_INFERENCE:-ON} == "ON" ]] ; then + if [[ ${WITH_C_API:-OFF} == "OFF" ]] ; then cat <