diff --git a/CMakeLists.txt b/CMakeLists.txt index 9a895a19c4625e066403c65b0ede840951bd249a..eabacbf7ccd02252bc5e1bc19391ae75722242ee 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -67,7 +67,6 @@ option(WITH_DISTRIBUTE "Compile with distributed support" OFF) option(USE_EIGEN_FOR_BLAS "Use matrix multiplication in Eigen" OFF) option(EIGEN_USE_THREADS "Compile with multi-threaded Eigen" OFF) option(WITH_ARM_FP16 "Use half precision support on armv8.2-a cpu" OFF) -option(WITH_FAST_BUNDLE_TEST "Bundle tests that can be run in a single process together to reduce launch overhead" OFF) option(WITH_CONTRIB "Compile the third-party contributation" OFF) option(REPLACE_ENFORCE_GLOG "Replace PADDLE_ENFORCE with glog/CHECK for better debug." OFF) option(WITH_ANAKIN "Compile with Anakin library" OFF) diff --git a/cmake/configure.cmake b/cmake/configure.cmake index e9852f00b1835adec31373f58ac538f9685251e0..7f5771e561f6cc419fc9b3094174645ac432546e 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -50,11 +50,7 @@ if(NOT WITH_PROFILER) endif(NOT WITH_PROFILER) if(NOT CMAKE_CROSSCOMPILING) - if(WITH_AVX AND AVX512F_FOUND) - set(SIMD_FLAG ${AVX512F_FLAG}) - elseif(WITH_AVX AND AVX2_FOUND) - set(SIMD_FLAG ${AVX2_FLAG}) - elseif(WITH_AVX AND AVX_FOUND) + if(WITH_AVX AND AVX_FOUND) set(SIMD_FLAG ${AVX_FLAG}) elseif(SSE3_FOUND) set(SIMD_FLAG ${SSE3_FLAG}) diff --git a/cmake/simd.cmake b/cmake/simd.cmake index 3eacf4d86aa0385eddb690d72e85e3384929bb99..566dc75fda019eb66759eb403f60e16f18cffef1 100644 --- a/cmake/simd.cmake +++ b/cmake/simd.cmake @@ -89,7 +89,9 @@ CHECK_CXX_SOURCE_RUNS(" #include int main() { - __m512i a = _mm512_undefined_epi32(); + __m512i a = _mm512_set_epi32 (-1, 2, -3, 4, -1, 2, -3, 4, + 13, -5, 6, -7, 9, 2, -6, 3); + __m512i result = _mm512_abs_epi32 (a); return 0; }" AVX512F_FOUND) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 3bbe7c2b8cd60be93cbe71cb1cdfe1b85aa7e461..bda2069d14307e4d2f9a0e369ef8b9d16911fdb6 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -67,8 +67,8 @@ paddle.fluid.layers.conv3d ArgSpec(args=['input', 'num_filters', 'filter_size', paddle.fluid.layers.sequence_pool ArgSpec(args=['input', 'pool_type', 'is_test'], varargs=None, keywords=None, defaults=(False,)) paddle.fluid.layers.sequence_softmax ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)) paddle.fluid.layers.softmax ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(True, None)) -paddle.fluid.layers.pool2d ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None)) -paddle.fluid.layers.pool3d ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None)) +paddle.fluid.layers.pool2d ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)) +paddle.fluid.layers.pool3d ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)) paddle.fluid.layers.batch_norm ArgSpec(args=['input', 'act', 'is_test', 'momentum', 'epsilon', 'param_attr', 'bias_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var', 'fuse_with_relu'], varargs=None, keywords=None, defaults=(None, False, 0.9, 1e-05, None, None, 'NCHW', False, None, None, None, False, False)) paddle.fluid.layers.beam_search_decode ArgSpec(args=['ids', 'scores', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.conv2d_transpose ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)) @@ -103,7 +103,7 @@ paddle.fluid.layers.beam_search ArgSpec(args=['pre_ids', 'pre_scores', 'ids', 's paddle.fluid.layers.row_conv ArgSpec(args=['input', 'future_context_size', 'param_attr', 'act'], varargs=None, keywords=None, defaults=(None, None)) paddle.fluid.layers.multiplex ArgSpec(args=['inputs', 'index'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.layer_norm ArgSpec(args=['input', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None, None)) -paddle.fluid.layers.softmax_with_cross_entropy ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index'], varargs=None, keywords=None, defaults=(False, -100)) +paddle.fluid.layers.softmax_with_cross_entropy ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index', 'numeric_stable_mode'], varargs=None, keywords=None, defaults=(False, -100, False)) paddle.fluid.layers.smooth_l1 ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.layers.one_hot ArgSpec(args=['input', 'depth'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.autoincreased_step_counter ArgSpec(args=['counter_name', 'begin', 'step'], varargs=None, keywords=None, defaults=(None, 1, 1)) @@ -174,9 +174,11 @@ paddle.fluid.layers.mean ArgSpec(args=['x', 'name'], varargs=None, keywords=None paddle.fluid.layers.mul ArgSpec(args=['x', 'y', 'x_num_col_dims', 'y_num_col_dims', 'name'], varargs=None, keywords=None, defaults=(1, 1, None)) paddle.fluid.layers.sigmoid_cross_entropy_with_logits ArgSpec(args=['x', 'label', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.maxout ArgSpec(args=['x', 'groups', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.affine_grid ArgSpec(args=['theta', 'out_shape', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.sequence_reverse ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.affine_channel ArgSpec(args=['x', 'scale', 'bias', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(None, None, 'NCHW', None)) paddle.fluid.layers.hash ArgSpec(args=['input', 'hash_size', 'num_hash', 'name'], varargs=None, keywords=None, defaults=(1, None)) +paddle.fluid.layers.grid_sampler ArgSpec(args=['x', 'grid', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.log_loss ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(0.0001, None)) paddle.fluid.layers.add_position_encoding ArgSpec(args=['input', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)) @@ -187,6 +189,7 @@ paddle.fluid.layers.batch ArgSpec(args=['reader', 'batch_size'], varargs=None, k paddle.fluid.layers.double_buffer ArgSpec(args=['reader', 'place', 'name'], varargs=None, keywords=None, defaults=(None, None)) paddle.fluid.layers.random_data_generator ArgSpec(args=['low', 'high', 'shapes', 'lod_levels', 'for_parallel'], varargs=None, keywords=None, defaults=(True,)) paddle.fluid.layers.py_reader ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True)) +paddle.fluid.layers.create_py_reader_by_data ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True)) paddle.fluid.layers.Preprocessor.__init__ ArgSpec(args=['self', 'reader', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.Preprocessor.block ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) paddle.fluid.layers.Preprocessor.inputs ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index aa6b7db5562f8596b1b30a16c0f08fcc433cfcd7..d6b5ad4570c1d8402dedb8596cc75d9eae5a91c7 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -1,5 +1,6 @@ cc_library(var_handle SRCS var_handle.cc DEPS place framework_proto node) cc_library(op_handle_base SRCS op_handle_base.cc DEPS var_handle device_context lod_tensor) +cc_library(op_graph_view SRCS op_graph_view.cc DEPS op_handle_base) cc_library(scale_loss_grad_op_handle SRCS scale_loss_grad_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory) cc_library(fetch_op_handle SRCS fetch_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory) cc_library(computation_op_handle SRCS computation_op_handle.cc DEPS framework_proto scope place operator op_registry) @@ -30,20 +31,25 @@ cc_library(data_balance_op_handle SRCS data_balance_op_handle.cc DEPS op_handle_ cc_library(gather_op_handle SRCS gather_op_handle.cc DEPS op_handle_base scope ddim memory variable_visitor) cc_library(fuse_vars_op_handle SRCS fuse_vars_op_handle.cc DEPS op_handle_base scope) -if(WITH_GPU) +cc_library(modify_op_lock_and_record_event_pass SRCS modify_op_lock_and_record_event_pass.cc DEPS computation_op_handle op_graph_view multi_devices_helper) + +if (WITH_GPU) cc_library(reference_count_pass SRCS reference_count_pass.cc DEPS computation_op_handle scale_loss_grad_op_handle rpc_op_handle all_reduce_op_handle reduce_op_handle broadcast_op_handle data_balance_op_handle graph graph_helper pass) endif() +cc_library(sequential_execution_pass SRCS sequential_execution_pass.cc DEPS graph graph_helper pass) + cc_library(multi_devices_graph_pass SRCS multi_devices_graph_pass.cc DEPS multi_devices_helper computation_op_handle scale_loss_grad_op_handle rpc_op_handle all_reduce_op_handle reduce_op_handle broadcast_op_handle data_balance_op_handle fused_broadcast_op_handle) -if(WITH_GPU) - cc_library(ssa_graph_executor SRCS ssa_graph_executor.cc DEPS graph framework_proto reference_count_pass) -else() - cc_library(ssa_graph_executor SRCS ssa_graph_executor.cc DEPS graph framework_proto) +set(SSA_GRAPH_EXECUTOR_DEPS graph framework_proto sequential_execution_pass modify_op_lock_and_record_event_pass) +if (WITH_GPU) + list(APPEND SSA_GRAPH_EXECUTOR_DEPS reference_count_pass) endif() +cc_library(ssa_graph_executor SRCS ssa_graph_executor.cc DEPS ${SSA_GRAPH_EXECUTOR_DEPS}) + cc_library(threaded_ssa_graph_executor SRCS threaded_ssa_graph_executor.cc DEPS fetch_op_handle ssa_graph_executor scope simple_threadpool device_context) diff --git a/paddle/fluid/framework/details/build_strategy.cc b/paddle/fluid/framework/details/build_strategy.cc index fefd27fc86fb8dce3311fa580d90f518906dd862..48f94a1f05614d4b797562ac67cdb9828fd0456e 100644 --- a/paddle/fluid/framework/details/build_strategy.cc +++ b/paddle/fluid/framework/details/build_strategy.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include "paddle/fluid/framework/details/multi_devices_graph_check_pass.h" #include "paddle/fluid/framework/details/multi_devices_graph_print_pass.h" +#include "paddle/fluid/framework/details/sequential_execution_pass.h" #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph_viz_pass.h" @@ -27,6 +28,10 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder { public: explicit ParallelExecutorPassBuilder(const BuildStrategy &strategy) : ir::PassBuilder(), strategy_(strategy) { + if (strategy_.enable_sequential_execution_) { + AppendPass("sequential_execution_pass"); + } + // Add a graph viz pass to record a graph. if (!strategy_.debug_graphviz_path_.empty()) { auto viz_pass = AppendPass("graph_viz_pass"); @@ -64,6 +69,10 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder { // Verify that the graph is correct for multi-device executor. AppendPass("multi_devices_check_pass"); + + if (strategy_.remove_unnecessary_lock_) { + AppendPass("modify_op_lock_and_record_event_pass"); + } } private: @@ -110,6 +119,11 @@ std::unique_ptr BuildStrategy::Apply( pass->Erase("nccl_ctxs"); pass->SetNotOwned("nccl_ctxs", nctx); #endif + } else if (pass->Type() == "sequential_execution_pass") { + pass->Erase(kAllOpDescs); + pass->Set>( + kAllOpDescs, + new std::vector(main_program.Block(0).AllOps())); } graph = pass->Apply(std::move(graph)); } @@ -125,3 +139,5 @@ USE_PASS(multi_batch_merge_pass); USE_PASS(multi_devices_pass); USE_PASS(multi_devices_check_pass); USE_PASS(multi_devices_print_pass); +USE_PASS(sequential_execution_pass); +USE_PASS(modify_op_lock_and_record_event_pass); diff --git a/paddle/fluid/framework/details/build_strategy.h b/paddle/fluid/framework/details/build_strategy.h index f3ffaf6ecd7c4dd99c40fe58ba88c0cbdc14bde7..6c7b54db8f610aa34cd51dcbc13063290cae3ac0 100644 --- a/paddle/fluid/framework/details/build_strategy.h +++ b/paddle/fluid/framework/details/build_strategy.h @@ -69,8 +69,12 @@ struct BuildStrategy { bool enable_data_balance_{false}; + bool enable_sequential_execution_{false}; + bool fuse_broadcast_op_{false}; + bool remove_unnecessary_lock_{false}; + // User normally doesn't need to call this API. // The PassBuilder allows for more customized insert, remove of passes // from python side. diff --git a/paddle/fluid/framework/details/computation_op_handle.cc b/paddle/fluid/framework/details/computation_op_handle.cc index f9bbfe0016ce0ea0d15a83cb532c44518549b8ad..7ad1e40c600c6e70cea822fac777ff20163078e6 100644 --- a/paddle/fluid/framework/details/computation_op_handle.cc +++ b/paddle/fluid/framework/details/computation_op_handle.cc @@ -29,9 +29,15 @@ ComputationOpHandle::ComputationOpHandle(ir::Node *node, Scope *scope, void ComputationOpHandle::RunImpl() { WaitInputVarGenerated(place_); - this->RunAndRecordEvent([this] { + auto run_func = [this]() { op_->Run(*scope_->FindVar(kLocalExecScopeName)->Get(), place_); - }); + }; + + if (is_lock_and_record_event_free_) { + run_func(); + } else { + this->RunAndRecordEvent(run_func); + } } bool ComputationOpHandle::NeedWait(VarHandleBase *in_var) { diff --git a/paddle/fluid/framework/details/computation_op_handle.h b/paddle/fluid/framework/details/computation_op_handle.h index e98f1ab148db083ac63a1afd43e334fbfae62539..662a91d6b4dfcfed563fdf2e46c22f83f90b40af 100644 --- a/paddle/fluid/framework/details/computation_op_handle.h +++ b/paddle/fluid/framework/details/computation_op_handle.h @@ -36,6 +36,8 @@ struct ComputationOpHandle : public OpHandleBase { const platform::Place &GetPlace() const { return place_; } + void SetLockAndRecordEventFree(bool b) { is_lock_and_record_event_free_ = b; } + protected: void RunImpl() override; @@ -45,6 +47,7 @@ struct ComputationOpHandle : public OpHandleBase { std::unique_ptr op_; Scope *scope_; platform::Place place_; + bool is_lock_and_record_event_free_{false}; }; } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/execution_strategy.h b/paddle/fluid/framework/details/execution_strategy.h index 5183be878eb49cccc68603c3fdd8023be5578036..15c496130c2b6c7643ff96661be09e5ac4870344 100644 --- a/paddle/fluid/framework/details/execution_strategy.h +++ b/paddle/fluid/framework/details/execution_strategy.h @@ -13,6 +13,7 @@ // limitations under the License. #pragma once +#include // for size_t namespace paddle { namespace framework { @@ -26,6 +27,7 @@ struct ExecutionStrategy { bool allow_op_delay_{false}; size_t num_iteration_per_drop_scope_{100}; ExecutorType type_{kDefault}; + bool dry_run_{false}; }; } // namespace details diff --git a/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc index 98fc390e72fab3701538fd6f974460fa5114fdb0..2b2329b9698908fdbe3385f1d555d756c47fc5c0 100644 --- a/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc @@ -128,7 +128,9 @@ void FastThreadedSSAGraphExecutor::RunOpAsync( size_t complete = 0; while (op_to_run != nullptr) { try { - op_to_run->Run(strategy_.use_cuda_); + if (LIKELY(!strategy_.dry_run_)) { + op_to_run->Run(strategy_.use_cuda_); + } ++complete; } catch (...) { exception_.Catch(std::current_exception()); diff --git a/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc b/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..169ce3ae7ca497e40d99b1c16633e35e1e4f1009 --- /dev/null +++ b/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc @@ -0,0 +1,59 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.h" +#include "paddle/fluid/framework/details/computation_op_handle.h" +#include "paddle/fluid/framework/details/multi_devices_helper.h" +#include "paddle/fluid/framework/details/op_graph_view.h" + +namespace paddle { +namespace framework { +namespace details { + +static bool IsLockAndRecordEventFreeComputationOpHandle( + ComputationOpHandle *op, const OpGraphView &graph_view) { + if (!platform::is_gpu_place(op->GetPlace())) return false; + for (auto &pending_op : graph_view.PendingOps(op)) { + auto *tmp = dynamic_cast(pending_op); + if (tmp == nullptr || !(tmp->GetPlace() == op->GetPlace())) { + return false; + } + } + return true; +} + +std::unique_ptr ModifyOpLockAndRecordEventPass::ApplyImpl( + std::unique_ptr ir_graph) const { + auto &all_ops = ir_graph->Get(kGraphOps); + OpGraphView graph_view(all_ops); + for (auto &op : all_ops) { + auto *compute_op = dynamic_cast(op.get()); + if (compute_op == nullptr) continue; + bool is_lock_and_record_event_free = + IsLockAndRecordEventFreeComputationOpHandle(compute_op, graph_view); + compute_op->SetLockAndRecordEventFree(is_lock_and_record_event_free); + if (is_lock_and_record_event_free) { + VLOG(10) << "Set is_lock_and_record_event_free be true in op " + << compute_op->DebugString(); + } + } + return ir_graph; +} + +} // namespace details +} // namespace framework +} // namespace paddle + +REGISTER_PASS(modify_op_lock_and_record_event_pass, + paddle::framework::details::ModifyOpLockAndRecordEventPass); diff --git a/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.h b/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.h new file mode 100644 index 0000000000000000000000000000000000000000..b54e1b318be95e1e0abf6830f8c918895df02718 --- /dev/null +++ b/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.h @@ -0,0 +1,32 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/pass.h" + +namespace paddle { +namespace framework { +namespace details { + +class ModifyOpLockAndRecordEventPass : public ir::Pass { + protected: + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/op_graph_view.cc b/paddle/fluid/framework/details/op_graph_view.cc new file mode 100644 index 0000000000000000000000000000000000000000..65dafd376f7c687410270e35f105ff595fe78f59 --- /dev/null +++ b/paddle/fluid/framework/details/op_graph_view.cc @@ -0,0 +1,77 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/op_graph_view.h" +#include +#include + +namespace paddle { +namespace framework { +namespace details { + +OpGraphView::OpGraphView( + const std::vector> &ops) { + Build(ops); +} + +void OpGraphView::Build(const std::vector> &ops) { + for (auto &op : ops) { + preceding_ops_[op.get()]; + pending_ops_[op.get()]; + for (auto &var : op->Outputs()) { + for (auto &pending_op : var->PendingOps()) { + preceding_ops_[pending_op].insert(op.get()); + pending_ops_[op.get()].insert(pending_op); + } + } + } + PADDLE_ENFORCE( + preceding_ops_.size() == ops.size() && pending_ops_.size() == ops.size(), + "There are duplicate ops in graph."); +} + +size_t OpGraphView::OpNumber() const { return preceding_ops_.size(); } + +std::unordered_set OpGraphView::AllOps() const { + std::unordered_set ret; + for (auto &pair : preceding_ops_) { + ret.insert(pair.first); + } + return ret; +} + +bool OpGraphView::HasOp(OpHandleBase *op) const { + return preceding_ops_.count(op) != 0; +} + +void OpGraphView::EnforceHasOp(OpHandleBase *op) const { + PADDLE_ENFORCE(HasOp(op), "Cannot find op %s in OpGraphView", + op == nullptr ? "nullptr" : op->DebugString()); +} + +const std::unordered_set &OpGraphView::PrecedingOps( + OpHandleBase *op) const { + EnforceHasOp(op); + return preceding_ops_.at(op); +} + +const std::unordered_set &OpGraphView::PendingOps( + OpHandleBase *op) const { + EnforceHasOp(op); + return pending_ops_.at(op); +} + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/op_graph_view.h b/paddle/fluid/framework/details/op_graph_view.h new file mode 100644 index 0000000000000000000000000000000000000000..398c019be00a6ff5f5b39fdcbe97339341b1685b --- /dev/null +++ b/paddle/fluid/framework/details/op_graph_view.h @@ -0,0 +1,54 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include "paddle/fluid/framework/details/op_handle_base.h" + +namespace paddle { +namespace framework { +namespace details { + +class OpGraphView { + public: + explicit OpGraphView(const std::vector> &ops); + + size_t OpNumber() const; + + std::unordered_set AllOps() const; + + const std::unordered_set &PrecedingOps( + OpHandleBase *op) const; + + const std::unordered_set &PendingOps(OpHandleBase *op) const; + + bool HasOp(OpHandleBase *op) const; + + private: + void Build(const std::vector> &ops); + void EnforceHasOp(OpHandleBase *op) const; + + std::unordered_map> + preceding_ops_; + std::unordered_map> + pending_ops_; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/reference_count_op_handle.h b/paddle/fluid/framework/details/reference_count_op_handle.h index fc479a4c4a1e7d5c824d3c202e0cccf743dd52c9..cc4ccfbdfc720284e683a8f3f59a4aa57a3a9eb1 100644 --- a/paddle/fluid/framework/details/reference_count_op_handle.h +++ b/paddle/fluid/framework/details/reference_count_op_handle.h @@ -51,7 +51,7 @@ class ReferenceCountOpHandle : public OpHandleBase { dev_ctx_ = static_cast( platform::DeviceContextPool::Instance().Get(place)); if (IsStreamGarabageCollector()) { - PADDLE_ENFORCE(cudaSetDevice(place.device)); + platform::SetDeviceId(place.device); PADDLE_ENFORCE(cudaEventCreateWithFlags(&event_, cudaEventDisableTiming)); } @@ -61,7 +61,7 @@ class ReferenceCountOpHandle : public OpHandleBase { ~ReferenceCountOpHandle() { if (IsStreamGarabageCollector()) { auto gpu_place = boost::get(dev_ctx_->GetPlace()); - PADDLE_ENFORCE(cudaSetDevice(gpu_place.device)); + platform::SetDeviceId(gpu_place.device); PADDLE_ENFORCE(cudaEventDestroy(event_)); } } diff --git a/paddle/fluid/framework/details/reference_count_pass.cc b/paddle/fluid/framework/details/reference_count_pass.cc index 2d1f688d64ece3322e253b0c070264b9eb73d678..0b994ced7f751f056fec076e3dea8d14d0bed991 100644 --- a/paddle/fluid/framework/details/reference_count_pass.cc +++ b/paddle/fluid/framework/details/reference_count_pass.cc @@ -43,6 +43,23 @@ static ComputationOpHandle *FindNextComputationOpHandle(VarHandle *var_in) { return nullptr; } +static void AddDependencyBetween(OpHandleBase *in, OpHandleBase *out, + ir::Graph *graph) { + auto it = std::find_if( + in->Outputs().begin(), in->Outputs().end(), [](VarHandleBase *var) { + return dynamic_cast(var) != nullptr; + }); + + if (it != in->Outputs().end()) { + out->AddInput(*it); + } else { + auto *dep_var = new DummyVarHandle(graph->CreateControlDepVar()); + graph->Get(kGraphDepVars).emplace(dep_var); + in->AddOutput(dep_var); + out->AddInput(dep_var); + } +} + std::unique_ptr ReferenceCountPass::ApplyImpl( std::unique_ptr graph) const { auto &ref_cnts = Get(kGlobalReferenceCount); @@ -133,12 +150,7 @@ std::unique_ptr ReferenceCountPass::ApplyImpl( auto *ref_cnt_handle = new ReferenceCountOpHandle( ref_cnt_node, next_compute_op->GetScope(), place, {var_name}, gcs[place.device].get(), cur_ref_cnts[place.device].get()); - if (next_compute_op->Outputs().empty()) { - auto *dep_var = new DummyVarHandle(graph->CreateControlDepVar()); - next_compute_op->AddOutput(dep_var); - graph->Get(kGraphDepVars).emplace(dep_var); - } - ref_cnt_handle->AddInput(next_compute_op->Outputs().front()); + AddDependencyBetween(next_compute_op, ref_cnt_handle, graph.get()); compute_ref_cnt_map[next_compute_op].reset(ref_cnt_handle); } } @@ -160,12 +172,7 @@ std::unique_ptr ReferenceCountPass::ApplyImpl( auto *ref_cnt_handle = new ReferenceCountOpHandle( ref_cnt_node, compute_op->GetScope(), place, in_var_names, gcs[place.device].get(), cur_ref_cnts[place.device].get()); - if (compute_op->Outputs().empty()) { - auto *dep_var = new DummyVarHandle(graph->CreateControlDepVar()); - compute_op->AddOutput(dep_var); - graph->Get(kGraphDepVars).emplace(dep_var); - } - ref_cnt_handle->AddInput(compute_op->Outputs().front()); + AddDependencyBetween(compute_op, ref_cnt_handle, graph.get()); compute_ref_cnt_map[compute_op].reset(ref_cnt_handle); } diff --git a/paddle/fluid/framework/details/rpc_op_handle.cc b/paddle/fluid/framework/details/rpc_op_handle.cc index 65df7f2d510bf4e3e930398182c6dd1eae89241f..dfa6c1ade1a024bb9087144d0e96fa5b0417f06a 100644 --- a/paddle/fluid/framework/details/rpc_op_handle.cc +++ b/paddle/fluid/framework/details/rpc_op_handle.cc @@ -29,22 +29,19 @@ RPCOpHandle::RPCOpHandle(ir::Node *node, const framework::OpDesc &op_desc, place_(place) {} void RPCOpHandle::RunImpl() { - // TODO(wuyi): need further analysis whether wait VarDummyHandle. - // Wait input done for (auto *in : inputs_) { auto &p = static_cast(in)->place_; - // FIXME(Yancey1989): need a better solution instead of use DebugString() - if (ir::IsControlDepVar(*in->Node())) { // HACK + if (ir::IsControlDepVar(*in->Node())) { continue; } if (in->GeneratedOp()) { in->GeneratedOp()->RecordWaitEventOnCtx(dev_ctxes_.at(p)); } } - auto &tmp_scope = local_scope_->FindVar(kLocalExecScopeName)->Get(); - // FIXME(wuyi): can not use RunAndRecordEvent here, for it will cause dead - // lock. - op_->Run(*tmp_scope, place_); + this->RunAndRecordEvent([this] { + op_->Run(*local_scope_->FindVar(kLocalExecScopeName)->Get(), + place_); + }); } std::string RPCOpHandle::Name() const { return name_; } diff --git a/paddle/fluid/framework/details/sequential_execution_pass.cc b/paddle/fluid/framework/details/sequential_execution_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..cc2c8bfef9f9f54c2e499467df0d22ce3f69d6b8 --- /dev/null +++ b/paddle/fluid/framework/details/sequential_execution_pass.cc @@ -0,0 +1,109 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/sequential_execution_pass.h" +#include +#include +#include +#include +#include "paddle/fluid/framework/op_proto_maker.h" + +namespace paddle { +namespace framework { +namespace details { + +static bool IsSameOpDesc(OpDesc *op1, OpDesc *op2) { + return op1->Type() == op2->Type() && op1->Inputs() == op2->Inputs() && + op1->Outputs() == op2->Outputs(); +} + +std::unique_ptr SequentialExecutionPass::ApplyImpl( + std::unique_ptr graph) const { + // FIXME(zjl): Insert dependencies between some distributed ops may cause + // the multi_devices_graph_pass fails. So we skip these ops here. + // Indeed, maybe we should not insert dependencies between these ops + // casually, which may cause deadlock easily. + // We should add more skipped distributed ops when found errors in + // multi_devices_graph_pass + static std::unordered_set skip_dist_ops{ + "send", "recv", "send_barrier", "fetch_barrier"}; + + auto &ops = Get>(kAllOpDescs); + std::vector op_node_list; + op_node_list.reserve(ops.size()); + + std::unordered_map op_deps; + std::unordered_map> pending_ops; + std::unordered_set ready_ops; + + for (ir::Node *node : graph->Nodes()) { + if (!node->IsOp()) continue; + std::unordered_set preceding_ops; + for (auto *in : node->inputs) { + PADDLE_ENFORCE(in->IsVar(), + "Preceding Node of Op Nodes must be Var Node"); + if (in->inputs.empty()) continue; + PADDLE_ENFORCE(in->inputs.size() == 1 && in->inputs[0]->IsOp(), + "Preceding Op Node of Var Node must be unique"); + preceding_ops.insert(in->inputs[0]); + pending_ops[in->inputs[0]].insert(node); + } + op_deps[node] = preceding_ops.size(); + if (preceding_ops.empty()) { + ready_ops.insert(node); + } + } + + for (auto *op_desc : ops) { + ir::Node *found_node = nullptr; + for (auto *node : ready_ops) { + if (IsSameOpDesc(op_desc, node->Op())) { + PADDLE_ENFORCE(found_node == nullptr, + "Found multiple op_desc in graph: %s", op_desc->Type()); + found_node = node; + } + } + + PADDLE_ENFORCE_NOT_NULL(found_node, "Cannot find op_desc in graph: %s", + op_desc->Type()); + for (auto *pending_op : pending_ops[found_node]) { + if (--op_deps.at(pending_op) == 0) { + ready_ops.insert(pending_op); + } + } + ready_ops.erase(found_node); + if (skip_dist_ops.count(op_desc->Type()) == 0) { + op_node_list.push_back(found_node); + } + } + + for (size_t i = 1; i < op_node_list.size(); ++i) { + auto *dep_var = graph->CreateControlDepVar(); + op_node_list[i]->inputs.push_back(dep_var); + op_node_list[i - 1]->outputs.push_back(dep_var); + dep_var->outputs.push_back(op_node_list[i]); + dep_var->inputs.push_back(op_node_list[i - 1]); + VLOG(10) << "Add dependencies between " << op_node_list[i - 1]->Name() + << " and " << op_node_list[i]->Name(); + } + return graph; +} + +} // namespace details +} // namespace framework +} // namespace paddle + +REGISTER_PASS(sequential_execution_pass, + paddle::framework::details::SequentialExecutionPass) + .RequirePassAttr(paddle::framework::details::kAllOpDescs); diff --git a/paddle/fluid/framework/details/sequential_execution_pass.h b/paddle/fluid/framework/details/sequential_execution_pass.h new file mode 100644 index 0000000000000000000000000000000000000000..a04c08bc2eb3bae797d648b30a22a5fee7ba0eaa --- /dev/null +++ b/paddle/fluid/framework/details/sequential_execution_pass.h @@ -0,0 +1,34 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/pass.h" + +namespace paddle { +namespace framework { +namespace details { + +constexpr char kAllOpDescs[] = "all_op_descs"; + +class SequentialExecutionPass : public ir::Pass { + protected: + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index dc63effd1b7c8fe5bb3fc91058eb855e552d3926..2d2bdb604f2d08adbaa0b38d04b8e377b2e6ab6c 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -211,7 +211,9 @@ void ThreadedSSAGraphExecutor::RunOp( if (VLOG_IS_ON(10)) { VLOG(10) << op << " " << op->Name() << " : " << op->DebugString(); } - op->Run(strategy_.use_cuda_); + if (LIKELY(!strategy_.dry_run_)) { + op->Run(strategy_.use_cuda_); + } VLOG(10) << op << " " << op->Name() << " Done "; running_ops_--; ready_var_q->Extend(op->Outputs()); diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h index dbb0b498d995a897b109bd4ef98521b2193276ed..5c0bc169eaf3f54596eb8e08b7bf80a82253c9b2 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h @@ -48,7 +48,7 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor { // Use topological sort algorithm FeedFetchList Run(const std::vector &fetch_tensors) override; - ~ThreadedSSAGraphExecutor() {} + ~ThreadedSSAGraphExecutor() final = default; private: void RunOp(const std::shared_ptr> &ready_var_q, diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index b212666637a5289c9c6cd3585655deaeed8afd4b..8ed0ba1dfa68b3e22f370c3f2dd0f83c3e5506b0 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -85,8 +85,10 @@ Executor::Executor(const platform::Place& place) : place_(place) {} void Executor::Close() { #ifdef PADDLE_WITH_DISTRIBUTE + // TODO(typhoonzero): complete message will need to use real trainer_id, + // except 0. ::paddle::operators::distributed::RPCClient::GetInstance< - ::paddle::operators::distributed::GRPCClient>() + ::paddle::operators::distributed::GRPCClient>(0) ->SendComplete(); #endif } diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index ce006b7a3fbc16f3c9149933390969b14a46b484..28231a53bad50fe9f19cfe3e73c3dc09aa3762cf 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -41,6 +41,7 @@ pass_library(conv_bn_fuse_pass inference) pass_library(seqconv_eltadd_relu_fuse_pass inference) if(WITH_MKLDNN) pass_library(mkldnn_placement_pass base) + pass_library(depthwise_conv_mkldnn_pass base) pass_library(conv_bias_mkldnn_fuse_pass inference) pass_library(conv_relu_mkldnn_fuse_pass inference) pass_library(conv_elementwise_add_mkldnn_fuse_pass inference) @@ -59,6 +60,7 @@ cc_test(graph_to_program_pass_test SRCS graph_to_program_pass_test.cc DEPS graph cc_test(test_graph_pattern_detector SRCS graph_pattern_detector_tester.cc DEPS graph_pattern_detector) cc_test(test_fc_fuse_pass SRCS fc_fuse_pass_tester.cc DEPS fc_fuse_pass framework_proto) if (WITH_MKLDNN) + cc_test(test_depthwise_conv_mkldnn_pass SRCS depthwise_conv_mkldnn_pass_tester.cc DEPS depthwise_conv_mkldnn_pass) cc_test(test_conv_relu_mkldnn_fuse_pass SRCS conv_relu_mkldnn_fuse_pass_tester.cc DEPS conv_relu_mkldnn_fuse_pass) cc_test(test_conv_elementwise_add_mkldnn_fuse_pass SRCS conv_elementwise_add_mkldnn_fuse_pass_tester.cc DEPS conv_elementwise_add_mkldnn_fuse_pass) endif () diff --git a/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.h b/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.h index b5de0d548713772e7ad41cfb6d8b3e9460683efb..fe585bd7c41bb32ae00462e989ab4c0051fc89a8 100644 --- a/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.h +++ b/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.h @@ -31,7 +31,8 @@ class ConvReLUFusePass : public FusePassBase { virtual ~ConvReLUFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; }; } // namespace ir diff --git a/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass_tester.cc index 8f4bab25ed4919881baf19a961a52aa229e06a8f..19248b4dfee1da81d18cd2effac08ba68dde80fb 100644 --- a/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass_tester.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/framework/ir/conv_relu_mkldnn_fuse_pass.h" #include +#include "paddle/fluid/framework/op_proto_maker.h" namespace paddle { namespace framework { @@ -36,6 +37,8 @@ void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name, op->SetInput("X", inputs); } op->SetOutput("Out", outputs); + op->SetAttr(OpProtoAndCheckerMaker::OpRoleAttrName(), + static_cast(OpRole::kForward)); } // a->OP0->b diff --git a/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.cc b/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..19056e18aa892dbc83dfbf7305b6ad8b6b6bc51c --- /dev/null +++ b/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.cc @@ -0,0 +1,58 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.h" +#include "paddle/fluid/framework/ir/graph_pattern_detector.h" + +namespace paddle { +namespace framework { +namespace ir { + +#define GET_NODE(id, pattern) \ + PADDLE_ENFORCE(subgraph.count(pattern.RetrieveNode(#id)), \ + "pattern has no Node called %s", #id); \ + auto* id = subgraph.at(pattern.RetrieveNode(#id)); \ + PADDLE_ENFORCE_NOT_NULL(id, "subgraph has no node %s", #id); + +std::unique_ptr DepthwiseConvMKLDNNPass::ApplyImpl( + std::unique_ptr graph) const { + PADDLE_ENFORCE(graph.get()); + FusePassBase::Init("depthwise_conv_mkldnn_pass", graph.get()); + GraphPatternDetector gpd; + + auto* pattern = gpd.mutable_pattern(); + pattern->NewNode("depthwise_conv") + ->assert_is_op("depthwise_conv2d") + ->assert_op_attr("use_mkldnn", true); + + int found_depthwise_conv_mkldnn_count = 0; + auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, + Graph* g) { + VLOG(3) << "handle DepthwiseConvMKLDNN fuse"; + GET_NODE(depthwise_conv, (*pattern)); + depthwise_conv->Op()->SetType("conv2d"); + found_depthwise_conv_mkldnn_count++; + }; + + gpd(graph.get(), handler); + AddStatis(found_depthwise_conv_mkldnn_count); + return graph; +} + +} // namespace ir +} // namespace framework +} // namespace paddle + +REGISTER_PASS(depthwise_conv_mkldnn_pass, + paddle::framework::ir::DepthwiseConvMKLDNNPass); diff --git a/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.h b/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.h new file mode 100644 index 0000000000000000000000000000000000000000..8ca6a7325186401c26eb7f9375cf83b7b97cc1c9 --- /dev/null +++ b/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.h @@ -0,0 +1,34 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/ir/fuse_pass_base.h" + +namespace paddle { +namespace framework { +namespace ir { + +class DepthwiseConvMKLDNNPass : public FusePassBase { + public: + virtual ~DepthwiseConvMKLDNNPass() {} + + protected: + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; +}; + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass_tester.cc b/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass_tester.cc new file mode 100644 index 0000000000000000000000000000000000000000..09d0b15f46a7e50afb6aea46383013ce6a6c6118 --- /dev/null +++ b/paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass_tester.cc @@ -0,0 +1,123 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/ir/depthwise_conv_mkldnn_pass.h" + +#include + +namespace paddle { +namespace framework { +namespace ir { + +void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name, + const std::vector& inputs, + const std::vector& outputs, bool use_mkldnn = false) { + auto* op = prog->MutableBlock(0)->AppendOp(); + op->SetType(type); + op->SetAttr("use_mkldnn", use_mkldnn); + op->SetAttr("name", name); + op->SetInput("Input", {inputs[0]}); + op->SetInput("Filter", {inputs[1]}); + op->SetInput("Bias", {inputs[2]}); + op->SetOutput("Out", outputs); +} + +// (a, weights, bias)->depthwise conv mkldnn->b +// (b, weights2, bias2)->depthwise conv no mkldnn->c +// (c, weights3, bias3)->conv mkldnn->d +// (d, weights3, bias3)->conv no mkldnn->e +ProgramDesc BuildProgramDesc() { + ProgramDesc prog; + for (auto& v : std::vector( + {"a", "b", "c", "d", "e", "weights", "bias", "weights2", "bias2", + "weights3", "bias3", "weights4", "bias4"})) { + auto* var = prog.MutableBlock(0)->Var(v); + var->SetType(proto::VarType::SELECTED_ROWS); + if (v == "weights" || v == "bias" || v == "weights2" || v == "bias2" || + v == "weights3" || v == "bias3" || v == "weights4" || v == "bias4") { + var->SetPersistable(true); + } + } + + // depthwise conv with MKL-DNN + SetOp(&prog, "depthwise_conv2d", "conv1", + std::vector({"a", "weights", "bias"}), + std::vector({"b"}), true); + // depthwise conv without MKL-DNN + SetOp(&prog, "depthwise_conv2d", "conv2", + std::vector({"b", "weights2", "bias2"}), + std::vector({"c"}), false); + // conv with MKL-DNN + SetOp(&prog, "conv2d", "conv3", + std::vector({"c", "weights3", "bias3"}), + std::vector({"d"}), true); + // conv without MKL-dNN + SetOp(&prog, "conv2d", "conv4", + std::vector({"d", "weights4", "bias4"}), + std::vector({"e"}), false); + + return prog; +} + +TEST(DepthwiseConvMKLDNNPass, basic) { + auto prog = BuildProgramDesc(); + + std::unique_ptr graph(new ir::Graph(prog)); + + auto pass = PassRegistry::Instance().Get("depthwise_conv_mkldnn_pass"); + + struct counters { + int mkldnn_depthwise_conv_nodes; + int other_depthwise_conv_nodes; + int mkldnn_conv_nodes; + int other_conv_nodes; + }; + + counters before{1, 1, 1, 1}; + + graph = pass->Apply(std::move(graph)); + + // initialize counters before loop + counters after{0, 0, 0, 0}; + + for (auto* node : graph->Nodes()) { + if (node->IsOp()) { + auto* op = node->Op(); + if (op->Type() == "conv2d") { + if (boost::get(op->GetAttr("use_mkldnn"))) + after.mkldnn_conv_nodes++; + else + after.other_conv_nodes++; + } else if (op->Type() == "depthwise_conv2d") { + if (boost::get(op->GetAttr("use_mkldnn"))) + after.mkldnn_depthwise_conv_nodes++; + else + after.other_depthwise_conv_nodes++; + } + } + } + + EXPECT_EQ(after.other_depthwise_conv_nodes, + before.other_depthwise_conv_nodes); + EXPECT_EQ(after.other_conv_nodes, before.other_conv_nodes); + EXPECT_EQ(after.mkldnn_depthwise_conv_nodes, + before.mkldnn_depthwise_conv_nodes - 1); + EXPECT_EQ(after.mkldnn_conv_nodes, before.mkldnn_conv_nodes + 1); +} + +} // namespace ir +} // namespace framework +} // namespace paddle + +USE_PASS(depthwise_conv_mkldnn_pass); diff --git a/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc b/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc index 06286a109d01af638e74e06ccc83e2a5500663ea..2db7d95cae1c8c59691fd642e2462e92ed58814f 100644 --- a/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/framework/ir/fc_fuse_pass.h" #include +#include "paddle/fluid/framework/op_proto_maker.h" namespace paddle { namespace framework { @@ -32,6 +33,8 @@ void SetOp(ProgramDesc* prog, const std::string& type, op->SetInput("X", inputs); } op->SetOutput("Out", outputs); + op->SetAttr(OpProtoAndCheckerMaker::OpRoleAttrName(), + static_cast(OpRole::kForward)); } // a->OP0->b diff --git a/paddle/fluid/framework/ir/graph.cc b/paddle/fluid/framework/ir/graph.cc index 265a128e95e6205159de67d87d5cb8ca1ad7d475..132159b8b272f311060a39b58919c26822bf50ee 100644 --- a/paddle/fluid/framework/ir/graph.cc +++ b/paddle/fluid/framework/ir/graph.cc @@ -23,8 +23,67 @@ limitations under the License. */ namespace paddle { namespace framework { namespace ir { +namespace { + +void CheckProgram(const ProgramDesc &program) { +#define _INT(role) static_cast(role) + + std::map visit; + for (OpDesc *op : program.Block(0).AllOps()) { + // For backward compatibility, some program doesn't have role added. + if (!op->HasAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) continue; + int role_id = + boost::get(op->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())); + visit[role_id] = true; + switch (role_id) { + case _INT(OpRole::kForward): + if (visit.find(_INT(OpRole::kBackward)) != visit.end()) { + LOG(ERROR) + << "Cannot add backward operator before forward operator %s." + << op->Type(); + } + break; + case _INT(OpRole::kBackward): + case _INT(OpRole::kBackward) | _INT(OpRole::kLoss): + PADDLE_ENFORCE( + visit.find(_INT(OpRole::kOptimize)) == visit.end(), + "Cannot add backward operator %s after optimize operator.", + op->Type()); + break; + case _INT(OpRole::kForward) | _INT(OpRole::kLoss): + PADDLE_ENFORCE(visit.find(_INT(OpRole::kBackward) | + _INT(OpRole::kLoss)) == visit.end(), + "Cannot add backward|loss operator before " + "forward|loss operator %s.", + op->Type()); + PADDLE_ENFORCE( + visit.find(_INT(OpRole::kOptimize)) == visit.end(), + "Cannot add forward|loss operator %s after optimize operator.", + op->Type()); + break; + case _INT(OpRole::kOptimize): + case _INT(OpRole::kOptimize) | _INT(OpRole::kLRSched): + PADDLE_ENFORCE(visit.find(_INT(OpRole::kBackward)) != visit.end(), + "Optimize operators %s must follow backward operator.", + op->Type()); + break; + case _INT(OpRole::kLRSched): + case _INT(OpRole::kDist): + case _INT(OpRole::kRPC): + case _INT(OpRole::kNotSpecified): + break; + default: + LOG(FATAL) << "Unknown operator role. Don't add new role because " + "you don't know what you are doing."; + } + } + +#undef _INT +} +} // namespace Graph::Graph(const ProgramDesc &program) : program_(program) { + CheckProgram(program_); // Make the nodes id start from 0. Node::ResetId(); auto var_nodes = InitFromProgram(program_); diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index 29b604afbfcfc2bac67e447db8cd4c671c036dbe..b20d70132256bd5df7411c46ff4eb246b1f14ba8 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -259,6 +259,15 @@ GraphPatternDetector::DetectPatterns() { return result; } +bool GraphItemCMP(const std::pair &a, + const std::pair &b) { + if (a.first != b.first) { + return a.first < b.first; + } else { + return a.second < b.second; + } +} + // TODO(Superjomn) enhance the function as it marks unique unique as duplicates // see https://github.com/PaddlePaddle/Paddle/issues/13550 void GraphPatternDetector::UniquePatterns( @@ -267,12 +276,16 @@ void GraphPatternDetector::UniquePatterns( std::vector result; std::unordered_set set; + std::hash hasher; for (auto &g : *subgraphs) { - size_t key = 0; - for (auto &item : g) { - key ^= std::hash{}(item.first); - key ^= std::hash{}(item.second); + // Sort the items in the sub-graph, and transform to a string key. + std::vector> sorted_keys(g.begin(), g.end()); + std::sort(sorted_keys.begin(), sorted_keys.end(), GraphItemCMP); + std::stringstream ss; + for (auto &item : sorted_keys) { + ss << item.first << ":" << item.second; } + auto key = hasher(ss.str()); if (!set.count(key)) { result.emplace_back(g); set.insert(key); diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index 1e7da9a69c7cbf8c13306656599a759515802b76..669d08c70c9b7453264806b346a6c9eb211cfd4a 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -418,7 +418,7 @@ void LoDTensor::MergeLoDTensor( PADDLE_ENFORCE_EQ(new_lod.size(), lod.size()); for (size_t j = 0; j < lod.size(); ++j) { auto &sub_lod = new_lod[j]; - auto &offset = sub_lod.back(); + size_t offset = sub_lod.back(); for (size_t k = 1; k < lod[j].size(); ++k) { sub_lod.push_back(lod[j][k] + offset); } diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index a45b9ec7a20ac3629d182f009b735d4d82fb5dc2..dfb107688ad7281765049cd9849d56b8a61bdd37 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -38,9 +38,20 @@ class ParallelExecutorPrivate { explicit ParallelExecutorPrivate(const std::vector &places) : places_(places) {} + ~ParallelExecutorPrivate() { + if (own_local_scope_) { + for (size_t i = 1; i < local_scopes_.size(); ++i) { + // Skip the first scope, since it is the global scope. + Scope *local_scope = local_scopes_[i]; + if (global_scope_->HasKid(local_scope)) { + global_scope_->DeleteScope(local_scope); + } + } + } + } std::vector places_; std::vector local_scopes_; - Scope *global_scope_; + Scope *global_scope_; // not owned std::unique_ptr executor_; #ifdef PADDLE_WITH_CUDA @@ -306,16 +317,6 @@ ParallelExecutor::~ParallelExecutor() { for (auto &p : member_->places_) { platform::DeviceContextPool::Instance().Get(p)->Wait(); } - - if (member_->own_local_scope_) { - for (size_t i = 1; i < member_->local_scopes_.size(); ++i) { - Scope *local_scope = member_->local_scopes_[i]; - if (member_->global_scope_->HasKid(local_scope)) { - member_->global_scope_->DeleteScope(local_scope); - } - } - } - // member_ must be destructed before gcs_ since the destructor of // ReferenceCountOpHandle use raw pointers of gcs_ inside. member_.reset(); diff --git a/paddle/fluid/framework/tensor_test.cc b/paddle/fluid/framework/tensor_test.cc index cb2061c06a429d8e8116001a4aa4e8c46ea13428..a0a9a573603ceb6b577529101cb331adbc81337a 100644 --- a/paddle/fluid/framework/tensor_test.cc +++ b/paddle/fluid/framework/tensor_test.cc @@ -75,6 +75,19 @@ TEST(Tensor, MutableData) { platform::CPUPlace()); EXPECT_EQ(p1, p2); } + // Not sure if it's desired, but currently, Tensor type can be changed. + { + framework::Tensor src_tensor; + int8_t* p1 = src_tensor.mutable_data(framework::make_ddim({1}), + platform::CPUPlace()); + EXPECT_NE(p1, nullptr); + *p1 = 1; + + uint8_t* p2 = src_tensor.mutable_data(framework::make_ddim({1}), + platform::CPUPlace()); + EXPECT_NE(p2, nullptr); + EXPECT_EQ(static_cast(p2[0]), 1); + } #ifdef PADDLE_WITH_CUDA { diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index 69bcbc0e5891f95af4de8dfd49a25648ca920ab1..ca1e01c89f07c4ffc3979a6a6c3728328e0a1819 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -153,6 +153,12 @@ void TensorCopySync(const Tensor& src, const platform::Place& dst_place, auto src_gpu_place = boost::get(src_place); auto dst_gpu_place = boost::get(dst_place); memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr); + } else if (platform::is_cuda_pinned_place(src_place) && + platform::is_gpu_place(dst_place)) { + auto src_pinned_place = boost::get(src_place); + auto dst_gpu_place = boost::get(dst_place); + memory::Copy(dst_gpu_place, dst_ptr, src_pinned_place, src_ptr, size, + nullptr); } #endif } diff --git a/paddle/fluid/framework/threadpool.cc b/paddle/fluid/framework/threadpool.cc index a588cb417aebe94bd4aeda02b1bc8ba07a04b960..fcec955360f1c681a62929e904d5736854a8ffad 100644 --- a/paddle/fluid/framework/threadpool.cc +++ b/paddle/fluid/framework/threadpool.cc @@ -57,10 +57,10 @@ ThreadPool::ThreadPool(int num_threads) : running_(true) { ThreadPool::~ThreadPool() { { // notify all threads to stop running - std::lock_guard l(mutex_); + std::unique_lock l(mutex_); running_ = false; - scheduled_.notify_all(); } + scheduled_.notify_all(); for (auto& t : threads_) { t->join(); @@ -70,19 +70,25 @@ ThreadPool::~ThreadPool() { void ThreadPool::TaskLoop() { while (true) { - std::unique_lock lock(mutex_); + Task task; - scheduled_.wait( - lock, [this] { return !this->tasks_.empty() || !this->running_; }); + { + std::unique_lock lock(mutex_); + scheduled_.wait( + lock, [this] { return !this->tasks_.empty() || !this->running_; }); - if (!running_ || tasks_.empty()) { - return; - } + if (!running_ && tasks_.empty()) { + return; + } + + if (tasks_.empty()) { + PADDLE_THROW("This thread has no task to Run"); + } - // pop a task from the task queue - auto task = std::move(tasks_.front()); - tasks_.pop(); - lock.unlock(); + // pop a task from the task queue + task = std::move(tasks_.front()); + tasks_.pop(); + } // run the task task(); diff --git a/paddle/fluid/framework/threadpool.h b/paddle/fluid/framework/threadpool.h index 0687e628aaa4fb7b2e67938fa09a319c8bb35aff..7a51d18fbbf65f68725aa86a6a0ce4d15dff5673 100644 --- a/paddle/fluid/framework/threadpool.h +++ b/paddle/fluid/framework/threadpool.h @@ -58,7 +58,7 @@ class ThreadPool { ~ThreadPool(); // Run pushes a function to the task queue and returns a std::future - // object. To wait for the completion of the task, call + // object. To wait for the completion of the task, call // std::future::wait(). template std::future Run(Callback fn) { @@ -69,7 +69,6 @@ class ThreadPool { template std::future> RunAndGetException( Callback fn) { - std::unique_lock lock(mutex_); Task task([fn]() -> std::unique_ptr { try { fn(); @@ -84,7 +83,13 @@ class ThreadPool { return nullptr; }); std::future> f = task.get_future(); - tasks_.push(std::move(task)); + { + std::unique_lock lock(mutex_); + if (!running_) { + PADDLE_THROW("enqueue on stopped ThreadPool"); + } + tasks_.push(std::move(task)); + } scheduled_.notify_one(); return f; } diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index c8a950fce0bfc2dd0856c48f7026fa7334eedeaf..da1711fc18b308f354887f831c044ed4489f87dd 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -1,3 +1,6 @@ +if(WITH_TESTING) + include(tests/test.cmake) # some generic cmake funtion for inference +endif() # analysis and tensorrt must be added before creating static library, # otherwise, there would be undefined reference to them in static library. add_subdirectory(analysis) diff --git a/paddle/fluid/inference/analysis/analyzer.h b/paddle/fluid/inference/analysis/analyzer.h index 7114f5222c5904bb4422b9e67ad035b85bbb770c..3af1d572dfd81197797dd7e57d87ba12c2f3548e 100644 --- a/paddle/fluid/inference/analysis/analyzer.h +++ b/paddle/fluid/inference/analysis/analyzer.h @@ -79,6 +79,7 @@ class Analyzer : public OrderedRegistry { "conv_bn_fuse_pass", // "conv_eltwiseadd_bn_fuse_pass", // #ifdef PADDLE_WITH_MKLDNN + "depthwise_conv_mkldnn_pass", // "conv_bias_mkldnn_fuse_pass", // "conv_relu_mkldnn_fuse_pass", // "conv_elementwise_add_mkldnn_fuse_pass", // diff --git a/paddle/fluid/inference/analysis/data_flow_graph_tester.cc b/paddle/fluid/inference/analysis/data_flow_graph_tester.cc index 1682011c3d8cc9927a4b026b370671798cace625..50ce20621fb289023ecccf7bb39d98169765d5ee 100644 --- a/paddle/fluid/inference/analysis/data_flow_graph_tester.cc +++ b/paddle/fluid/inference/analysis/data_flow_graph_tester.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/inference/analysis/data_flow_graph.h" +#include "paddle/fluid/framework/op_proto_maker.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/inference/analysis/ut_helper.h" @@ -130,6 +131,8 @@ void SetOp(framework::ProgramDesc* prog, const std::string& type, op->SetType(type); op->SetInput("Xs", inputs); op->SetOutput("Xs", outputs); + op->SetAttr(framework::OpProtoAndCheckerMaker::OpRoleAttrName(), + static_cast(framework::OpRole::kForward)); } TEST(DataFlowGraph, Build_IR_Graph) { diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index a55426f74f988176aeb180e48d1af8632ed3b5c7..49a9ebe3ddec1e4fd59ae1155a706859e249d25c 100644 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -17,39 +17,12 @@ if(APPLE) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=pessimizing-move") endif(APPLE) - -set(inference_deps paddle_inference_api paddle_fluid_api analysis pass ir_pass_manager naive_executor ${GLOB_PASS_LIB} - ) +set(inference_deps paddle_inference_api paddle_fluid_api analysis pass ir_pass_manager naive_executor ${GLOB_PASS_LIB}) if(WITH_GPU AND TENSORRT_FOUND) set(inference_deps ${inference_deps} paddle_inference_tensorrt_subgraph_engine analysis_predictor) endif() -function(inference_api_test TARGET_NAME) - if (WITH_TESTING) - set(options "") - set(oneValueArgs SRC) - set(multiValueArgs ARGS) - cmake_parse_arguments(inference_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - if (WITH_GPU) - cc_test(${TARGET_NAME} - SRCS ${inference_test_SRC} - DEPS "${inference_deps}" - ARGS --dirname=${PYTHON_TESTS_DIR}/book/ --fraction_of_gpu_memory_to_use=0.15) - else() - cc_test(${TARGET_NAME} - SRCS ${inference_test_SRC} - DEPS "${inference_deps}" - ARGS --dirname=${PYTHON_TESTS_DIR}/book/) - endif() - if(inference_test_ARGS) - set_tests_properties(${TARGET_NAME} - PROPERTIES DEPENDS "${inference_test_ARGS}") - endif() - endif(WITH_TESTING) -endfunction(inference_api_test) - cc_library(reset_tensor_array SRCS details/reset_tensor_array.cc DEPS lod_tensor scope) cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS reset_tensor_array lod_tensor scope) cc_library(analysis_predictor SRCS analysis_predictor.cc DEPS paddle_inference_api analysis naive_executor zero_copy_tensor) @@ -59,8 +32,11 @@ cc_test(test_paddle_inference_api SRCS api_tester.cc DEPS paddle_inference_api) -inference_api_test(test_api_impl SRC api_impl_tester.cc - ARGS test_word2vec test_image_classification) +if(WITH_TESTING) + inference_base_test(test_api_impl SRCS api_impl_tester.cc DEPS ${inference_deps} + ARGS --word2vec_dirname=${WORD2VEC_MODEL_DIR} --book_dirname=${PYTHON_TESTS_DIR}/book) + set_tests_properties(test_api_impl PROPERTIES DEPENDS test_image_classification) +endif() cc_test(test_analysis_predictor SRCS analysis_predictor_tester.cc DEPS analysis_predictor ${inference_deps} paddle_inference_api ARGS --dirname=${PYTHON_TESTS_DIR}/book) @@ -68,8 +44,10 @@ if(WITH_GPU AND TENSORRT_FOUND) cc_library(paddle_inference_tensorrt_subgraph_engine SRCS api_tensorrt_subgraph_engine.cc DEPS paddle_inference_api analysis tensorrt_engine paddle_inference_api paddle_fluid_api tensorrt_converter zero_copy_tensor_dummy) - -inference_api_test(test_api_tensorrt_subgraph_engine SRC api_tensorrt_subgraph_engine_tester.cc ARGS test_word2vec) + if(WITH_TESTING) + inference_base_test(test_api_tensorrt_subgraph_engine SRCS api_tensorrt_subgraph_engine_tester.cc DEPS ${inference_deps} + ARGS --dirname=${WORD2VEC_MODEL_DIR}) + endif() endif() if (WITH_ANAKIN AND WITH_MKL) # only needed in CI diff --git a/paddle/fluid/inference/api/api_impl_tester.cc b/paddle/fluid/inference/api/api_impl_tester.cc index 1d4dfb8649563ab23ffeec1f79bb305fd2ebae26..5152b8670ddb206f0927c03149684af4a096df42 100644 --- a/paddle/fluid/inference/api/api_impl_tester.cc +++ b/paddle/fluid/inference/api/api_impl_tester.cc @@ -22,12 +22,14 @@ limitations under the License. */ #include "paddle/fluid/inference/tests/test_helper.h" #ifdef __clang__ -#define ACC_DIFF 4e-2 +#define ACC_DIFF 4e-3 #else -#define ACC_DIFF 1e-2 +#define ACC_DIFF 1e-3 #endif -DEFINE_string(dirname, "", "Directory of the inference model."); +DEFINE_string(word2vec_dirname, "", + "Directory of the word2vec inference model."); +DEFINE_string(book_dirname, "", "Directory of the book inference model."); namespace paddle { @@ -49,7 +51,7 @@ PaddleTensor LodTensorToPaddleTensor(framework::LoDTensor* t) { NativeConfig GetConfig() { NativeConfig config; - config.model_dir = FLAGS_dirname + "/word2vec.inference.model"; + config.model_dir = FLAGS_word2vec_dirname; LOG(INFO) << "dirname " << config.model_dir; config.fraction_of_gpu_memory = 0.15; #ifdef PADDLE_WITH_CUDA @@ -116,7 +118,7 @@ void MainImageClassification(bool use_gpu) { NativeConfig config = GetConfig(); config.use_gpu = use_gpu; config.model_dir = - FLAGS_dirname + "/image_classification_resnet.inference.model"; + FLAGS_book_dirname + "/image_classification_resnet.inference.model"; const bool is_combined = false; std::vector> feed_target_shapes = @@ -220,7 +222,7 @@ void MainThreadsImageClassification(bool use_gpu) { NativeConfig config = GetConfig(); config.use_gpu = use_gpu; config.model_dir = - FLAGS_dirname + "/image_classification_resnet.inference.model"; + FLAGS_book_dirname + "/image_classification_resnet.inference.model"; auto main_predictor = CreatePaddlePredictor(config); std::vector jobs(num_jobs); diff --git a/paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc index 702158ea3bcab854eece3ccd40724d92efcbae67..89c9a65cb06ba565f0e0cbdb9b6031c6adbcb64e 100644 --- a/paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc +++ b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc @@ -29,13 +29,13 @@ void CompareTensorRTWithFluid(bool enable_tensorrt) { //# 1. Create PaddlePredictor with a config. NativeConfig config0; - config0.model_dir = FLAGS_dirname + "word2vec.inference.model"; + config0.model_dir = FLAGS_dirname; config0.use_gpu = true; config0.fraction_of_gpu_memory = 0.3; config0.device = 0; MixedRTConfig config1; - config1.model_dir = FLAGS_dirname + "word2vec.inference.model"; + config1.model_dir = FLAGS_dirname; config1.use_gpu = true; config1.fraction_of_gpu_memory = 0.3; config1.device = 0; diff --git a/paddle/fluid/inference/api/demo_ci/run.sh b/paddle/fluid/inference/api/demo_ci/run.sh index 1ac655bdbbf7c45bfdde2c5fa8026fab2c891903..ff718077c1ba6b10fe87aac10d84f96a23ad6bba 100755 --- a/paddle/fluid/inference/api/demo_ci/run.sh +++ b/paddle/fluid/inference/api/demo_ci/run.sh @@ -62,7 +62,7 @@ for WITH_STATIC_LIB in ON OFF; do -DWITH_GPU=$TEST_GPU_CPU \ -DWITH_STATIC_LIB=$WITH_STATIC_LIB make -j - word2vec_model=${PADDLE_ROOT}'/build/python/paddle/fluid/tests/book/word2vec.inference.model' + word2vec_model=$DATA_DIR'/word2vec/word2vec.inference.model' if [ -d $word2vec_model ]; then for use_gpu in $use_gpu_list; do ./simple_on_word2vec \ diff --git a/paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc b/paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc index 487fc7b14e2c04af1e17efff91de0bfeed15c8a7..5446fd4d4256c10442a53ea09a447cf308cbd681 100644 --- a/paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc +++ b/paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc @@ -70,12 +70,8 @@ void Main(bool use_gpu) { // The outputs' buffers are in CPU memory. for (size_t i = 0; i < std::min(static_cast(5), num_elements); i++) { - // Here will result random fail, for that the model is trained by CI, the - // train phase is not stable, so the result will be random. - // TODO(Superjomn) will restore after the model is upload. - // CHECK_NEAR(static_cast(outputs.front().data.data())[i], - // result[i], - // 0.001); + CHECK_NEAR(static_cast(outputs.front().data.data())[i], result[i], + 0.001); } } } diff --git a/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc b/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc index 0a37d3968c39d2c244bbd82161afddf6330e421d..7bcf2dd1eeb17e802c5647df31945284ae08fa95 100644 --- a/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc @@ -18,6 +18,21 @@ namespace paddle { namespace inference { namespace tensorrt { +bool to_skip_merging_optimize(TensorRTEngine* engine_, + const std::vector& filters, + const std::vector& strides, + const std::vector& paddings, + std::string input_name) { + if (engine_->itensor_quote_num[input_name] > 0) { + return true; + } + if (filters[0] == 1 && filters[1] == 1 && strides[0] == 1 && + strides[1] == 1 && paddings[0] == 0 && paddings[1] == 0) + engine_->itensor_quote_num[input_name] += 1; + + return false; +} + class Conv2dOpConverter : public OpConverter { public: void operator()(const framework::proto::OpDesc& op, @@ -31,6 +46,7 @@ class Conv2dOpConverter : public OpConverter { PADDLE_ENFORCE_EQ(op_desc.Output("Output").size(), 1); auto* X = engine_->GetITensor(op_desc.Input("Input").front()); + // Declare weights auto* Y_v = scope.FindVar(op_desc.Input("Filter").front()); PADDLE_ENFORCE_NOT_NULL(Y_v); @@ -83,7 +99,10 @@ class Conv2dOpConverter : public OpConverter { std::move(weight_tensor); layer->getOutput(0)->setName(output_name.c_str()); engine_->SetITensor(output_name, layer->getOutput(0)); - if (test_mode) { + + if (test_mode || + to_skip_merging_optimize(engine_, {filter_h, filter_w}, strides, + paddings, op_desc.Input("Input").front())) { engine_->DeclareOutput(output_name); } } diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index 14e9e14d33d637ee68e37593cc48721e5169499f..9e0f95844761db7571c5313726d34685a9aa66b2 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -133,6 +133,10 @@ void TensorRTEngine::DeclareOutput(const nvinfer1::ILayer *layer, int offset, buffer_sizes_[name] = 0; } +bool TensorRTEngine::HasDeclared(const std::string &name) { + return buffer_sizes_.count(name) > 0; +} + void TensorRTEngine::DeclareOutput(const std::string &name) { PADDLE_ENFORCE_EQ(0, buffer_sizes_.count(name), "duplicate output name %s", name); diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index bd3ba4cea6551a7f6651e311e2649de191a6faa1..d9d3827321127631c0af6e5cfd2dfdd640cee146 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -91,6 +91,8 @@ class TensorRTEngine : public EngineBase { const std::string& name); // Set the itensor_map_[name] as the network's output, and set its name. void DeclareOutput(const std::string& name); + // Check if the ITensor has been declared + bool HasDeclared(const std::string& name); // GPU memory address for an ITensor with specific name. One can operate on // these memory directly for acceleration, for example, output the converted @@ -132,6 +134,16 @@ class TensorRTEngine : public EngineBase { std::unordered_map> weight_map; + // TODO: (NHZLX) + // In the normal case, the paddle-trt exists bug when runing the googlenet. + // When there are more than two convolutions of 1 * 1 with the same input, the + // paddle-tensorrt will do the merging optimization, which fuse those conv + // into + // one conv, and then trigger bug. So, We should use strategy to avoid this + // optimization for the time being. This bug will be fixed in the future. + std::unordered_map + itensor_quote_num; + private: // the max batch size int max_batch_; diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index c3dd1f433691e1c96e9f38ef7b595befad26408f..2ca84c80058b35840aff5d072cdc99ecf5165f8e 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -1,17 +1,9 @@ -set(INFERENCE_URL "http://paddle-inference-dist.cdn.bcebos.com") -set(INFERENCE_DEMO_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo" CACHE STRING - "A path setting inference demo download directories.") set(INFERENCE_EXTRA_DEPS paddle_inference_api paddle_fluid_api ir_pass_manager analysis_predictor) -function (inference_download install_dir url filename) - message(STATUS "Download inference test stuff from ${url}/${filename}") - execute_process(COMMAND bash -c "mkdir -p ${install_dir}") - execute_process(COMMAND bash -c "cd ${install_dir} && wget -q ${url}/${filename}") - message(STATUS "finish downloading ${filename}") -endfunction() -function (inference_download_and_uncompress install_dir url filename) - inference_download(${install_dir} ${url} ${filename}) - execute_process(COMMAND bash -c "cd ${install_dir} && tar xzf ${filename}") +function(download_model install_dir model_name) + if (NOT EXISTS ${install_dir}) + inference_download_and_uncompress(${install_dir} ${INFERENCE_URL} ${model_name}) + endif() endfunction() function(download_model_and_data install_dir model_name data_name) @@ -27,6 +19,13 @@ function(inference_analysis_api_test target install_dir filename) ARGS --infer_model=${install_dir}/model --infer_data=${install_dir}/data.txt) endfunction() +function(inference_analysis_api_test_with_fake_data target install_dir filename model_name) + download_model(${install_dir} ${model_name}) + inference_analysis_test(${target} SRCS ${filename} + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} + ARGS --infer_model=${install_dir}/model) +endfunction() + # RNN1 if(NOT APPLE) set(RNN1_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/rnn1") @@ -43,6 +42,15 @@ set(RNN2_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/rnn2") download_model_and_data(${RNN2_INSTALL_DIR} "rnn2_model.tar.gz" "rnn2_data.txt.tar.gz") inference_analysis_api_test(test_analyzer_rnn2 ${RNN2_INSTALL_DIR} analyzer_rnn2_tester.cc) +# DAM +set(DAM_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/dam") +download_model_and_data(${DAM_INSTALL_DIR} "DAM_model.tar.gz" "DAM_data.txt.tar.gz") +inference_analysis_test(test_analyzer_dam SRCS analyzer_dam_tester.cc + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} ARGS + --infer_model=${DAM_INSTALL_DIR}/model + --infer_data=${DAM_INSTALL_DIR}/data.txt + --use_analysis=0) + # chinese_ner set(CHINESE_NER_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/chinese_ner") download_model_and_data(${CHINESE_NER_INSTALL_DIR} "chinese_ner_model.tar.gz" "chinese_ner-data.txt.tar.gz") @@ -66,17 +74,13 @@ inference_analysis_api_test(test_analyzer_seq_conv1 ${SEQ_CONV1_INSTALL_DIR} ana # ocr set(OCR_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/ocr") if (NOT EXISTS ${OCR_INSTALL_DIR}) - inference_download_and_uncompress(${OCR_INSTALL_DIR} "http://paddlemodels.cdn.bcebos.com/" "inference-vis-demos%2Focr.tar.gz") + inference_download_and_uncompress(${OCR_INSTALL_DIR} "http://paddlemodels.cdn.bcebos.com/" "inference-vis-demos%2Focr.tar.gz") endif() inference_analysis_api_test(test_analyzer_ocr ${OCR_INSTALL_DIR} analyzer_vis_tester.cc) # resnet50 -set(RESNET50_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/resnet50") -if (NOT EXISTS ${RESNET50_INSTALL_DIR}) - inference_download_and_uncompress(${RESNET50_INSTALL_DIR} ${INFERENCE_URL} "resnet50_model.tar.gz") -endif() -inference_analysis_test(test_analyzer_resnet50 SRCS analyzer_resnet50_tester.cc - EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} ARGS --infer_model=${RESNET50_INSTALL_DIR}/model) +inference_analysis_api_test_with_fake_data(test_analyzer_resnet50 + "${INFERENCE_DEMO_INSTALL_DIR}/resnet50" analyzer_resnet50_tester.cc "resnet50_model.tar.gz") # anakin if (WITH_ANAKIN AND WITH_MKL) # only needed in CI diff --git a/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc b/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc new file mode 100644 index 0000000000000000000000000000000000000000..ceac5dc7e14365c77cf1cbbbc16e4bf3ebfced73 --- /dev/null +++ b/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc @@ -0,0 +1,224 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/tests/api/tester_helper.h" + +namespace paddle { +namespace inference { +using contrib::AnalysisConfig; +#define MAX_TURN_NUM 9 +#define MAX_TURN_LEN 50 +static std::vector result_data; + +struct DataRecord { + std::vector> + turns[MAX_TURN_NUM]; // turns data : MAX_TURN_NUM + std::vector> + turns_mask[MAX_TURN_NUM]; // turns mask data : MAX_TURN_NUM + std::vector> response; // response data : 1 + std::vector> response_mask; // response mask data : 1 + size_t batch_iter{0}; + size_t batch_size{1}; + size_t num_samples; // total number of samples + DataRecord() = default; + explicit DataRecord(const std::string &path, int batch_size = 1) + : batch_size(batch_size) { + Load(path); + } + DataRecord NextBatch() { + DataRecord data; + size_t batch_end = batch_iter + batch_size; + // NOTE skip the final batch, if no enough data is provided. + if (batch_end <= response.size()) { + for (int i = 0; i < MAX_TURN_NUM; ++i) { + data.turns[i].assign(turns[i].begin() + batch_iter, + turns[i].begin() + batch_end); + } + for (int i = 0; i < MAX_TURN_NUM; ++i) { + data.turns_mask[i].assign(turns_mask[i].begin() + batch_iter, + turns_mask[i].begin() + batch_end); + } + data.response.assign(response.begin() + batch_iter, + response.begin() + batch_end); + data.response_mask.assign(response_mask.begin() + batch_iter, + response_mask.begin() + batch_end); + CHECK(!data.response.empty()); + CHECK(!data.response_mask.empty()); + CHECK_EQ(data.response.size(), data.response_mask.size()); + } + batch_iter += batch_size; + return data; + } + void Load(const std::string &path) { + std::ifstream file(path); + std::string line; + size_t num_lines = 0; + result_data.clear(); + while (std::getline(file, line)) { + num_lines++; + std::vector data; + split(line, ',', &data); + CHECK_EQ(data.size(), 2 * MAX_TURN_NUM + 3); + // load turn data + std::vector turns_tmp[MAX_TURN_NUM]; + for (int i = 0; i < MAX_TURN_NUM; ++i) { + split_to_int64(data[i], ' ', &turns_tmp[i]); + turns[i].push_back(std::move(turns_tmp[i])); + } + // load turn_mask data + std::vector turns_mask_tmp[MAX_TURN_NUM]; + for (int i = 0; i < MAX_TURN_NUM; ++i) { + split_to_float(data[MAX_TURN_NUM + i], ' ', &turns_mask_tmp[i]); + turns_mask[i].push_back(std::move(turns_mask_tmp[i])); + } + // load response data + std::vector response_tmp; + split_to_int64(data[2 * MAX_TURN_NUM], ' ', &response_tmp); + response.push_back(std::move(response_tmp)); + // load response_mask data + std::vector response_mask_tmp; + split_to_float(data[2 * MAX_TURN_NUM + 1], ' ', &response_mask_tmp); + response_mask.push_back(std::move(response_mask_tmp)); + // load result data + float result_tmp; + result_tmp = std::stof(data[2 * MAX_TURN_NUM + 2]); + result_data.push_back(result_tmp); + } + num_samples = num_lines; + } +}; + +void PrepareInputs(std::vector *input_slots, DataRecord *data, + int batch_size) { + PaddleTensor turns_tensor[MAX_TURN_NUM]; + PaddleTensor turns_mask_tensor[MAX_TURN_NUM]; + PaddleTensor response_tensor; + PaddleTensor response_mask_tensor; + std::string turn_pre = "turn_"; + std::string turn_mask_pre = "turn_mask_"; + + auto one_batch = data->NextBatch(); + int size = one_batch.response[0].size(); + CHECK_EQ(size, MAX_TURN_LEN); + // turn tensor assignment + for (int i = 0; i < MAX_TURN_NUM; ++i) { + turns_tensor[i].name = turn_pre + std::to_string(i); + turns_tensor[i].shape.assign({batch_size, size, 1}); + turns_tensor[i].dtype = PaddleDType::INT64; + TensorAssignData(&turns_tensor[i], one_batch.turns[i]); + } + // turn mask tensor assignment + for (int i = 0; i < MAX_TURN_NUM; ++i) { + turns_mask_tensor[i].name = turn_mask_pre + std::to_string(i); + turns_mask_tensor[i].shape.assign({batch_size, size, 1}); + turns_mask_tensor[i].dtype = PaddleDType::FLOAT32; + TensorAssignData(&turns_mask_tensor[i], one_batch.turns_mask[i]); + } + // response tensor assignment + response_tensor.name = "response"; + response_tensor.shape.assign({batch_size, size, 1}); + response_tensor.dtype = PaddleDType::INT64; + TensorAssignData(&response_tensor, one_batch.response); + // response mask tensor assignment + response_mask_tensor.name = "response_mask"; + response_mask_tensor.shape.assign({batch_size, size, 1}); + response_mask_tensor.dtype = PaddleDType::FLOAT32; + TensorAssignData(&response_mask_tensor, one_batch.response_mask); + + // Set inputs. + for (int i = 0; i < MAX_TURN_NUM; ++i) { + input_slots->push_back(std::move(turns_tensor[i])); + } + for (int i = 0; i < MAX_TURN_NUM; ++i) { + input_slots->push_back(std::move(turns_mask_tensor[i])); + } + input_slots->push_back(std::move(response_tensor)); + input_slots->push_back(std::move(response_mask_tensor)); +} + +void SetConfig(contrib::AnalysisConfig *cfg) { + cfg->prog_file = FLAGS_infer_model + "/__model__"; + cfg->param_file = FLAGS_infer_model + "/param"; + cfg->use_gpu = false; + cfg->device = 0; + cfg->specify_input_name = true; + cfg->enable_ir_optim = true; +} + +void SetInput(std::vector> *inputs) { + DataRecord data(FLAGS_infer_data, FLAGS_batch_size); + std::vector input_slots; + int test_batch_num = + FLAGS_test_all_data ? data.num_samples / FLAGS_batch_size : 1; + LOG(INFO) << "The number of samples to be test: " + << test_batch_num * FLAGS_batch_size; + for (int bid = 0; bid < test_batch_num; ++bid) { + input_slots.clear(); + PrepareInputs(&input_slots, &data, FLAGS_batch_size); + (*inputs).emplace_back(input_slots); + } +} + +// Easy for profiling independently. +TEST(Analyzer_dam, profile) { + contrib::AnalysisConfig cfg; + SetConfig(&cfg); + + std::vector outputs; + std::vector> input_slots_all; + SetInput(&input_slots_all); + TestPrediction(cfg, input_slots_all, &outputs, FLAGS_num_threads); + + if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) { + PADDLE_ENFORCE_GT(outputs.size(), 0); + size_t size = GetSize(outputs[0]); + PADDLE_ENFORCE_GT(size, 0); + float *result = static_cast(outputs[0].data.data()); + for (size_t i = 0; i < size; i++) { + EXPECT_NEAR(result[i], result_data[i], 1e-3); + } + } +} + +// Check the fuse status +TEST(Analyzer_dam, fuse_statis) { + contrib::AnalysisConfig cfg; + SetConfig(&cfg); + + if (FLAGS_use_analysis) { + int num_ops; + auto predictor = CreatePaddlePredictor(cfg); + auto fuse_statis = GetFuseStatis( + static_cast(predictor.get()), &num_ops); + ASSERT_TRUE(fuse_statis.count("fc_fuse")); + EXPECT_EQ(fuse_statis.at("fc_fuse"), 317); + EXPECT_EQ(num_ops, 2020); + } +} + +// Compare result of NativeConfig and AnalysisConfig +TEST(Analyzer_dam, compare) { + contrib::AnalysisConfig cfg; + SetConfig(&cfg); + + std::vector> input_slots_all; + SetInput(&input_slots_all); + + if (FLAGS_use_analysis) { + CompareNativeAndAnalysis(cfg, input_slots_all); + } +} + +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc b/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc index 577b97e271aacab5d6740de7c8bc00bc87ae54dd..d91f7c314d0a936da6f5b0c41920c905af5cd0ee 100644 --- a/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc @@ -20,7 +20,6 @@ using contrib::AnalysisConfig; struct DataRecord { std::vector> word_data_all, mention_data_all; - std::vector> rnn_word_datas, rnn_mention_datas; std::vector lod; // two inputs have the same lod info. size_t batch_iter{0}; size_t batch_size{1}; @@ -45,8 +44,6 @@ struct DataRecord { CHECK(!data.mention_data_all.empty()); CHECK_EQ(data.word_data_all.size(), data.mention_data_all.size()); for (size_t j = 0; j < data.word_data_all.size(); j++) { - data.rnn_word_datas.push_back(data.word_data_all[j]); - data.rnn_mention_datas.push_back(data.mention_data_all[j]); // calculate lod data.lod.push_back(data.lod.back() + data.word_data_all[j].size()); } @@ -87,8 +84,8 @@ void PrepareInputs(std::vector *input_slots, DataRecord *data, lod_mention_tensor.shape.assign({size, 1}); lod_mention_tensor.lod.assign({one_batch.lod}); // assign data - TensorAssignData(&lod_word_tensor, one_batch.rnn_word_datas); - TensorAssignData(&lod_mention_tensor, one_batch.rnn_mention_datas); + TensorAssignData(&lod_word_tensor, one_batch.word_data_all); + TensorAssignData(&lod_mention_tensor, one_batch.mention_data_all); // Set inputs. input_slots->assign({lod_word_tensor, lod_mention_tensor}); for (auto &tensor : *input_slots) { diff --git a/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc b/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc index c2151eea0823f80feb17b014c1f739d2a15ae862..e5c8dfd22a006d5271248c5b083aab2c22417502 100644 --- a/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc @@ -30,25 +30,7 @@ void SetConfig(AnalysisConfig *cfg) { } void SetInput(std::vector> *inputs) { - PADDLE_ENFORCE_EQ(FLAGS_test_all_data, 0, "Only have single batch of data."); - - PaddleTensor input; - // channel=3, height/width=318 - std::vector shape({FLAGS_batch_size, 3, 318, 318}); - input.shape = shape; - input.dtype = PaddleDType::FLOAT32; - - // fill input data, for profile easily, do not use random data here. - size_t size = FLAGS_batch_size * 3 * 318 * 318; - input.data.Resize(size * sizeof(float)); - float *input_data = static_cast(input.data.data()); - for (size_t i = 0; i < size; i++) { - *(input_data + i) = static_cast(i) / size; - } - - std::vector input_slots; - input_slots.assign({input}); - (*inputs).emplace_back(input_slots); + SetFakeImageInput(inputs, FLAGS_infer_model); } // Easy for profiling independently. @@ -61,13 +43,6 @@ void profile(bool use_mkldnn = false) { std::vector> input_slots_all; SetInput(&input_slots_all); TestPrediction(cfg, input_slots_all, &outputs, FLAGS_num_threads); - - if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) { - PADDLE_ENFORCE_EQ(outputs.size(), 1UL); - size_t size = GetSize(outputs[0]); - // output is a 512-dimension feature - EXPECT_EQ(size, 512 * FLAGS_batch_size); - } } TEST(Analyzer_resnet50, profile) { profile(); } @@ -83,8 +58,7 @@ TEST(Analyzer_resnet50, fuse_statis) { auto predictor = CreatePaddlePredictor(cfg); auto fuse_statis = GetFuseStatis( static_cast(predictor.get()), &num_ops); - ASSERT_TRUE(fuse_statis.count("fc_fuse")); - EXPECT_EQ(fuse_statis.at("fc_fuse"), 1); + LOG(INFO) << "num_ops: " << num_ops; } // Compare result of NativeConfig and AnalysisConfig diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index 19c3f532d5dcb7588793fa21fa179f6b48649103..8c5888d8da7b33eeca77311c10dd818648e8e524 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -25,6 +25,7 @@ #include "paddle/fluid/inference/api/analysis_predictor.h" #include "paddle/fluid/inference/api/helper.h" #include "paddle/fluid/inference/api/paddle_inference_pass.h" +#include "paddle/fluid/inference/tests/test_helper.h" #include "paddle/fluid/platform/profiler.h" DEFINE_string(infer_model, "", "model path"); @@ -105,6 +106,34 @@ std::unordered_map GetFuseStatis(PaddlePredictor *predictor, return fuse_statis; } +void SetFakeImageInput(std::vector> *inputs, + const std::string &dirname) { + // Set fake_image_data + PADDLE_ENFORCE_EQ(FLAGS_test_all_data, 0, "Only have single batch of data."); + std::vector> feed_target_shapes = + GetFeedTargetShapes(dirname, true, "model", "params"); + int dim1 = feed_target_shapes[0][1]; + int dim2 = feed_target_shapes[0][2]; + int dim3 = feed_target_shapes[0][3]; + + PaddleTensor input; + std::vector shape({FLAGS_batch_size, dim1, dim2, dim3}); + input.shape = shape; + input.dtype = PaddleDType::FLOAT32; + + // fill input data, for profile easily, do not use random data here. + size_t size = FLAGS_batch_size * dim1 * dim2 * dim3; + input.data.Resize(size * sizeof(float)); + float *input_data = static_cast(input.data.data()); + for (size_t i = 0; i < size; i++) { + *(input_data + i) = static_cast(i) / size; + } + + std::vector input_slots; + input_slots.assign({input}); + (*inputs).emplace_back(input_slots); +} + void TestOneThreadPrediction( const AnalysisConfig &config, const std::vector> &inputs, diff --git a/paddle/fluid/inference/tests/api/trt_models_tester.cc b/paddle/fluid/inference/tests/api/trt_models_tester.cc index 91111f2af56065bbf57ba3a41bddd55ecced1060..75840a9c437d956da4f542a38b2532ea20ee96c5 100644 --- a/paddle/fluid/inference/tests/api/trt_models_tester.cc +++ b/paddle/fluid/inference/tests/api/trt_models_tester.cc @@ -93,11 +93,16 @@ void CompareTensorRTWithFluid(int batch_size, std::string model_dirname) { } } -TEST(trt_models_test, main) { - std::vector infer_models = {"mobilenet", "resnet50", - "resnext50"}; - for (auto &model_dir : infer_models) { - CompareTensorRTWithFluid(1, FLAGS_dirname + "/" + model_dir); - } +TEST(trt_models_test, mobilenet) { + CompareTensorRTWithFluid(1, FLAGS_dirname + "/mobilenet"); +} + +TEST(trt_models_test, resnet50) { + CompareTensorRTWithFluid(1, FLAGS_dirname + "/resnet50"); } + +TEST(trt_models_test, resnext50) { + CompareTensorRTWithFluid(1, FLAGS_dirname + "/resnext50"); +} + } // namespace paddle diff --git a/paddle/fluid/inference/tests/test.cmake b/paddle/fluid/inference/tests/test.cmake new file mode 100644 index 0000000000000000000000000000000000000000..ab3a30ce6bba14a7d5ec700a159d90031e6b5dc7 --- /dev/null +++ b/paddle/fluid/inference/tests/test.cmake @@ -0,0 +1,31 @@ +set(INFERENCE_URL "http://paddle-inference-dist.cdn.bcebos.com" CACHE STRING "inference download url") +set(INFERENCE_DEMO_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo" CACHE STRING + "A path setting inference demo download directories.") +function (inference_download install_dir url filename) + message(STATUS "Download inference test stuff from ${url}/${filename}") + execute_process(COMMAND bash -c "mkdir -p ${install_dir}") + execute_process(COMMAND bash -c "cd ${install_dir} && wget -q ${url}/${filename}") + message(STATUS "finish downloading ${filename}") +endfunction() + +function (inference_download_and_uncompress install_dir url filename) + inference_download(${install_dir} ${url} ${filename}) + execute_process(COMMAND bash -c "cd ${install_dir} && tar xzf ${filename}") +endfunction() + +set(WORD2VEC_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/word2vec") +if (NOT EXISTS ${WORD2VEC_INSTALL_DIR}) + inference_download_and_uncompress(${WORD2VEC_INSTALL_DIR} ${INFERENCE_URL} "word2vec.inference.model.tar.gz") +endif() +set(WORD2VEC_MODEL_DIR "${WORD2VEC_INSTALL_DIR}/word2vec.inference.model") + +function (inference_base_test TARGET) + set(options "") + set(oneValueArgs "") + set(multiValueArgs SRCS ARGS DEPS) + cmake_parse_arguments(base_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + if(WITH_GPU) + set(mem_opt "--fraction_of_gpu_memory_to_use=0.5") + endif() + cc_test(${TARGET} SRCS ${base_test_SRCS} DEPS ${base_test_DEPS} ARGS ${mem_opt} ${base_test_ARGS}) +endfunction() diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index 94f0550df57e79fa68c135f5c9c4b7effe6ac156..2118fcfd4bb1589947617e462f09971fcc090b98 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -18,7 +18,6 @@ limitations under the License. */ #include #include -#include "paddle/fluid/framework/ir/graph_to_program_pass.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/inference/io.h" #include "paddle/fluid/platform/profiler.h" @@ -94,15 +93,15 @@ void CheckError(const paddle::framework::LoDTensor& output1, std::unique_ptr InitProgram( paddle::framework::Executor* executor, paddle::framework::Scope* scope, - const std::string& dirname, const bool is_combined = false) { + const std::string& dirname, const bool is_combined = false, + const std::string& prog_filename = "__model_combined__", + const std::string& param_filename = "__params_combined__") { std::unique_ptr inference_program; if (is_combined) { // All parameters are saved in a single file. // Hard-coding the file names of program and parameters in unittest. // The file names should be consistent with that used in Python API // `fluid.io.save_inference_model`. - std::string prog_filename = "__model_combined__"; - std::string param_filename = "__params_combined__"; inference_program = paddle::inference::Load(executor, scope, dirname + "/" + prog_filename, dirname + "/" + param_filename); @@ -115,12 +114,15 @@ std::unique_ptr InitProgram( } std::vector> GetFeedTargetShapes( - const std::string& dirname, const bool is_combined = false) { + const std::string& dirname, const bool is_combined = false, + const std::string& prog_filename = "__model_combined__", + const std::string& param_filename = "__params_combined__") { auto place = paddle::platform::CPUPlace(); auto executor = paddle::framework::Executor(place); auto* scope = new paddle::framework::Scope(); - auto inference_program = InitProgram(&executor, scope, dirname, is_combined); + auto inference_program = InitProgram(&executor, scope, dirname, is_combined, + prog_filename, param_filename); auto& global_block = inference_program->Block(0); const std::vector& feed_target_names = @@ -136,15 +138,6 @@ std::vector> GetFeedTargetShapes( return feed_target_shapes; } -void Compile(paddle::framework::ProgramDesc* program) { - std::unique_ptr g( - new paddle::framework::ir::Graph(*program)); - auto pass = paddle::framework::ir::PassRegistry::Instance().Get( - "graph_to_program_pass"); - pass->SetNotOwned("program", program); - pass->Apply(std::move(g)); -} - template void TestInference(const std::string& dirname, const std::vector& cpu_feeds, @@ -182,7 +175,6 @@ void TestInference(const std::string& dirname, paddle::platform::DeviceContextPool::Instance().Get(place)); inference_program = InitProgram(&executor, scope, dirname, is_combined); } - Compile(inference_program.get()); // Disable the profiler and print the timing information paddle::platform::DisableProfiler(paddle::platform::EventSortingKey::kDefault, @@ -261,5 +253,3 @@ void TestInference(const std::string& dirname, delete scope; } - -USE_PASS(graph_to_program_pass); diff --git a/paddle/fluid/operators/activation_op.cu b/paddle/fluid/operators/activation_op.cu index 27487b396ccf63d962defa6b270063ccb409164e..d3a7ceed466a9b5e4d773f1531d198adff97eac2 100644 --- a/paddle/fluid/operators/activation_op.cu +++ b/paddle/fluid/operators/activation_op.cu @@ -26,6 +26,8 @@ namespace plat = paddle::platform; act_type##_grad, ops::ActivationGradKernel>, \ ops::ActivationGradKernel>); + ops::grad_functor>, \ + ops::ActivationGradKernel>); FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_CUDA_KERNEL); diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index 2e31d1c9c708225135e27c93ba94722794c4b282..0747469e0f4c4fe6a323a499c720a54d1e278e09 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -333,8 +333,7 @@ struct SqrtGradFunctor : public BaseActivationFunctor { template void operator()(Device d, X x, Out out, dOut dout, dX dx) const { - const Out out_conj = Eigen::numext::conj(out); - dx.device(d) = static_cast(0.5) * dout / out_conj; + dx.device(d) = static_cast(0.5) * dout / out; } }; @@ -740,7 +739,7 @@ struct PowGradFunctor : public BaseActivationFunctor { typename dX> void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = dout * static_cast(factor) * - x.pow(static_cast(factor - static_cast(1))); + x.pow(static_cast(factor) - static_cast(1)); } }; diff --git a/paddle/fluid/operators/adagrad_op.cc b/paddle/fluid/operators/adagrad_op.cc index a3ef9ad9f91f1f626bd33876693ecc17ad76b96b..c88297ff544ddb0e5a97452a8ad2e8f9f77825ba 100644 --- a/paddle/fluid/operators/adagrad_op.cc +++ b/paddle/fluid/operators/adagrad_op.cc @@ -119,8 +119,8 @@ struct SparseAdagradFunctor { auto* grad_merge_data = grad_merge.mutable_value()->template data(); // 2. m += g_m * g_m - math::scatter::Mul sqare_func; - auto grad_square = sqare_func(context, grad_merge, grad_merge); + auto grad_square = + SquareSelectedRows(context, grad_merge); math::SelectedRowsAddToTensor functor; functor(context, grad_square, moment); diff --git a/paddle/fluid/operators/adagrad_op.cu b/paddle/fluid/operators/adagrad_op.cu index b25268786d622bc7a94117849763833e528bef48..b99b33343d36fbb7f6b1a2928e142ca615b238b3 100644 --- a/paddle/fluid/operators/adagrad_op.cu +++ b/paddle/fluid/operators/adagrad_op.cu @@ -84,8 +84,8 @@ struct SparseAdagradFunctor { auto* grad_merge_data = grad_merge.mutable_value()->template data(); framework::Vector merge_rows(grad_merge.rows()); // 2. m += g_m * g_m - math::scatter::Mul sqare_func; - auto grad_square = sqare_func(context, grad_merge, grad_merge); + auto grad_square = + SquareSelectedRows(context, grad_merge); math::SelectedRowsAddToTensor functor; functor(context, grad_square, moment); diff --git a/paddle/fluid/operators/adagrad_op.h b/paddle/fluid/operators/adagrad_op.h index 0a16ce00f71586ef55007c3753e024be29d0ed56..9f6ef391696aa8718be71ae945e746b876813d94 100644 --- a/paddle/fluid/operators/adagrad_op.h +++ b/paddle/fluid/operators/adagrad_op.h @@ -28,6 +28,20 @@ struct SparseAdagradFunctor { framework::Tensor *moment, framework::Tensor *param); }; +template +framework::SelectedRows SquareSelectedRows( + const DeviceContext &context, const framework::SelectedRows &input) { + framework::SelectedRows out; + out.set_rows(input.rows()); + out.set_height(input.height()); + out.mutable_value()->mutable_data(input.value().dims(), + context.GetPlace()); + auto e_out = framework::EigenVector::Flatten(*(out.mutable_value())); + auto e_in = framework::EigenVector::Flatten(input.value()); + e_out.device(*context.eigen_device()) = e_in.square(); + return out; +} + template class AdagradOpKernel : public framework::OpKernel { public: diff --git a/paddle/fluid/operators/affine_grid_cudnn_op.cu.cc b/paddle/fluid/operators/affine_grid_cudnn_op.cu.cc new file mode 100644 index 0000000000000000000000000000000000000000..ed71594ba5781590f3291d56c4ba1a4443003bd5 --- /dev/null +++ b/paddle/fluid/operators/affine_grid_cudnn_op.cu.cc @@ -0,0 +1,112 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/cudnn_helper.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using ScopedSpatialTransformerDescriptor = + platform::ScopedSpatialTransformerDescriptor; + +template +class CUDNNAffineGridOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "It must use CUDAPlace."); + auto& dev_ctx = ctx.template device_context(); + auto handle = dev_ctx.cudnn_handle(); + auto* theta = ctx.Input("Theta"); + auto* output = ctx.Output("Output"); + const T* theta_data = theta->data(); + + int n = theta->dims()[0]; + auto size_attr = ctx.Attr>("output_shape"); + Tensor h_sizes; + int* h_size_data; + if (size_attr.size() == 0) { + auto* output_shape = ctx.Input("OutputShape"); + framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); + h_size_data = h_sizes.data(); + } else { + h_size_data = h_sizes.mutable_data({4}, platform::CPUPlace()); + h_size_data[0] = n; + h_size_data[1] = size_attr[1]; + h_size_data[2] = size_attr[2]; + h_size_data[3] = size_attr[3]; + } + + T* output_data = output->mutable_data( + {n, h_size_data[2], h_size_data[3], 2}, ctx.GetPlace()); + ScopedSpatialTransformerDescriptor st_desc; + cudnnSpatialTransformerDescriptor_t cudnn_st_desc = + st_desc.descriptor(4, h_size_data); + + PADDLE_ENFORCE(platform::dynload::cudnnSpatialTfGridGeneratorForward( + handle, cudnn_st_desc, theta_data, output_data)); + } +}; + +template +class CUDNNAffineGridGradOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "It must use CUDAPlace."); + auto& dev_ctx = ctx.template device_context(); + auto handle = dev_ctx.cudnn_handle(); + auto output_grad = ctx.Input(framework::GradVarName("Output")); + auto theta_grad = ctx.Output(framework::GradVarName("Theta")); + + int n = output_grad->dims()[0]; + auto size_attr = ctx.Attr>("output_shape"); + Tensor h_sizes; + int* h_size_data; + if (size_attr.size() == 0) { + auto* output_shape = ctx.Input("OutputShape"); + framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); + h_size_data = h_sizes.data(); + } else { + h_size_data = h_sizes.mutable_data({4}, platform::CPUPlace()); + h_size_data[0] = n; + h_size_data[1] = size_attr[1]; + h_size_data[2] = size_attr[2]; + h_size_data[3] = size_attr[3]; + } + + ScopedSpatialTransformerDescriptor st_desc; + cudnnSpatialTransformerDescriptor_t cudnn_st_desc = + st_desc.descriptor(4, h_size_data); + + const T* output_grad_data = output_grad->data(); + T* theta_grad_data = theta_grad->mutable_data(ctx.GetPlace()); + + PADDLE_ENFORCE(platform::dynload::cudnnSpatialTfGridGeneratorBackward( + handle, cudnn_st_desc, output_grad_data, theta_grad_data)); + } +}; + +} // namespace operators +} // namespace paddle + +namespace plat = paddle::platform; +REGISTER_OP_KERNEL(affine_grid, CUDNN, plat::CUDAPlace, + paddle::operators::CUDNNAffineGridOpKernel, + paddle::operators::CUDNNAffineGridOpKernel); +REGISTER_OP_KERNEL(affine_grid_grad, CUDNN, plat::CUDAPlace, + paddle::operators::CUDNNAffineGridGradOpKernel, + paddle::operators::CUDNNAffineGridGradOpKernel); diff --git a/paddle/fluid/operators/affine_grid_op.cc b/paddle/fluid/operators/affine_grid_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..6f7da445fc84fc1f14b01a633af0e886aec6f8ed --- /dev/null +++ b/paddle/fluid/operators/affine_grid_op.cc @@ -0,0 +1,231 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/affine_grid_op.h" +#include +#include "paddle/fluid/framework/op_registry.h" +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/platform/cudnn_helper.h" +#endif + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +struct Linspace { + void operator()(T start, T end, int count, framework::Tensor* numbers, + const framework::ExecutionContext& ctx) { + T* number_data = numbers->mutable_data({count}, platform::CPUPlace()); + T slice = (end - start) / (T)(count - 1); + for (int i = 0; i < count; ++i) { + number_data[i] = start + (T)i * slice; + } + } +}; + +class AffineGridOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Theta"), + "Input(Theta) of AffineGridOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Output"), + "Output(Output) of AffineGridOp should not be null."); + auto theta_dims = ctx->GetInputDim("Theta"); + PADDLE_ENFORCE(theta_dims.size() == 3, + "AffineGrid's Input(Theta) should be 3-D tensor."); + + auto output_shape = ctx->Attrs().Get>("output_shape"); + if (output_shape.size() == 0) { + PADDLE_ENFORCE(ctx->HasInput("OutputShape"), + "Input(OutputShape) of AffineGridOp should not be null if " + "attr(output_shape) is not configured."); + auto output_shape_dims = ctx->GetInputDim("OutputShape"); + PADDLE_ENFORCE(output_shape_dims.size() == 1, + "AffineGrid's Input(OutputShape) should be 1-D tensor."); + } else { + PADDLE_ENFORCE(output_shape.size() == 4, + "The size of attr(output_shape) should be 4."); + } + + PADDLE_ENFORCE(theta_dims[1] == 2, "Input(theta) dims[1] should be 2."); + PADDLE_ENFORCE(theta_dims[2] == 3, "Input(theta) dims[2] should be 3."); + // N * H * W * 2 + ctx->SetOutputDim("Output", + framework::make_ddim({theta_dims[0], -1, -1, 2})); + ctx->ShareLoD("Theta", "Output"); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + framework::LibraryType library{framework::LibraryType::kPlain}; +#ifdef PADDLE_WITH_CUDA + if (platform::CanCUDNNBeUsed(ctx)) { + library = framework::LibraryType::kCUDNN; + } +#endif + auto data_type = framework::ToDataType(ctx.Input("Theta")->type()); + return framework::OpKernelType(data_type, ctx.GetPlace(), + framework::DataLayout::kAnyLayout, library); + } +}; + +class AffineGridOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput( + "Theta", + "(Tensor) A batch of affine transform parameters with shape [N, 2, 3]. " + "It is used to transform coordinate (x_0, y_0) to coordinate (x_1, " + "y_1)."); + AddInput("OutputShape", + "(Tensor) The shape of target image with format [N, C, H, W].") + .AsDispensable(); + AddOutput("Output", "(Tensor) Output Tensor with shape [N, H, W, 2]."); + AddAttr( + "use_cudnn", + "(bool, default false) Only used in cudnn kernel, need install cudnn") + .SetDefault(true); + AddAttr>( + "output_shape", + "The target output image shape with format [N, C, H, W].") + .SetDefault(std::vector()); + + AddComment(R"DOC( + It generates a grid of (x,y) coordinates using the parameters of the + affine transformation that correspond to a set of points where the input + feature map should be sampled to produce the transformed output feature map. + + Given: + Theta = [[[x_11, x_12, x_13] + [x_14, x_15, x_16]] + [[x_21, x_22, x_23] + [x_24, x_25, x_26]]] + + OutputShape = [2, 3, 5, 5] + + Step 1: + + Generate relative coordinates according to OutputShape. + The values of relative coordinates are in the interval between -1 and 1. + The shape of the relative coordinates is [2, H, W] as below: + + C = [[[-1. -1. -1. -1. -1. ] + [-0.5 -0.5 -0.5 -0.5 -0.5] + [ 0. 0. 0. 0. 0. ] + [ 0.5 0.5 0.5 0.5 0.5] + [ 1. 1. 1. 1. 1. ]] + [[-1. -0.5 0. 0.5 1. ] + [-1. -0.5 0. 0.5 1. ] + [-1. -0.5 0. 0.5 1. ] + [-1. -0.5 0. 0.5 1. ] + [-1. -0.5 0. 0.5 1. ]]] + C[0] is the coordinates in height axis and C[1] is the coordinates in width axis. + + Step2: + Tanspose and reshape C to shape [H * W, 2] and append ones to last dimension. The we get: + C_ = [[-1. -1. 1. ] + [-0.5 -1. 1. ] + [ 0. -1. 1. ] + [ 0.5 -1. 1. ] + [ 1. -1. 1. ] + [-1. -0.5 1. ] + [-0.5 -0.5 1. ] + [ 0. -0.5 1. ] + [ 0.5 -0.5 1. ] + [ 1. -0.5 1. ] + [-1. 0. 1. ] + [-0.5 0. 1. ] + [ 0. 0. 1. ] + [ 0.5 0. 1. ] + [ 1. 0. 1. ] + [-1. 0.5 1. ] + [-0.5 0.5 1. ] + [ 0. 0.5 1. ] + [ 0.5 0.5 1. ] + [ 1. 0.5 1. ] + [-1. 1. 1. ] + [-0.5 1. 1. ] + [ 0. 1. 1. ] + [ 0.5 1. 1. ] + [ 1. 1. 1. ]] + Step3: + Compute output by equation $$Output[i] = C_ * Theta[i]^T$$ + )DOC"); + } +}; + +class AffineGridOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + auto theta_dims = ctx->GetInputDim("Theta"); + if (ctx->HasOutput(framework::GradVarName("Theta"))) { + ctx->SetOutputDim(framework::GradVarName("Theta"), theta_dims); + } + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + framework::LibraryType library_{framework::LibraryType::kPlain}; +#ifdef PADDLE_WITH_CUDA + if (platform::CanCUDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kCUDNN; + } +#endif + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Theta")->type()), + ctx.GetPlace(), framework::DataLayout::kAnyLayout, library_); + } +}; + +class AffineGridGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto* op = new framework::OpDesc(); + op->SetType("affine_grid_grad"); + op->SetInput("Theta", Input("Theta")); + op->SetInput("OutputShape", Input("OutputShape")); + op->SetInput(framework::GradVarName("Output"), OutputGrad("Output")); + + op->SetAttrMap(Attrs()); + + op->SetOutput(framework::GradVarName("Theta"), InputGrad("Theta")); + return std::unique_ptr(op); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(affine_grid, ops::AffineGridOp, ops::AffineGridOpMaker, + ops::AffineGridGradMaker); +REGISTER_OPERATOR(affine_grid_grad, ops::AffineGridOpGrad); + +REGISTER_OP_CPU_KERNEL( + affine_grid, + ops::AffineGridOpKernel, + ops::AffineGridOpKernel); +REGISTER_OP_CPU_KERNEL( + affine_grid_grad, + ops::AffineGridGradOpKernel, + ops::AffineGridGradOpKernel); diff --git a/paddle/fluid/operators/affine_grid_op.h b/paddle/fluid/operators/affine_grid_op.h new file mode 100644 index 0000000000000000000000000000000000000000..87d23831486e658374d4c011412fdef57be1b994 --- /dev/null +++ b/paddle/fluid/operators/affine_grid_op.h @@ -0,0 +1,174 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/blas.h" +#include "paddle/fluid/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +template +using EigenTensor = framework::EigenTensor; + +using Array1 = Eigen::DSizes; +using Array2 = Eigen::DSizes; +using Array3 = Eigen::DSizes; +using Array4 = Eigen::DSizes; + +/** + *Return a tensor with evenly spaced numbers over a specified interval. + */ +template +struct Linspace { + void operator()(T start, T end, int count, framework::Tensor* numbers, + const framework::ExecutionContext& ctx); +}; + +template +inline void GetIdxMap(int n, int h, int w, Tensor* grid, + const framework::ExecutionContext& ctx) { + auto& place = *ctx.template device_context().eigen_device(); + grid->mutable_data({n, h, w, 3}, ctx.GetPlace()); + auto grid_t = EigenTensor::From(*grid); + // Get indexes of height with shape [height, width, 1] + Tensor h_idx; + Linspace linspace; + linspace((T)-1, (T)1, h, &h_idx, ctx); + auto h_idx_t = EigenTensor::From(h_idx); + // Get indexes of width with shape [height, width, 1] + Tensor w_idx; + linspace((T)-1, (T)1, w, &w_idx, ctx); + auto w_idx_t = EigenTensor::From(w_idx); + // Get constant ones tensor with shape [height, width, 1] + Tensor ones; + ones.mutable_data({h, w, 1}, ctx.GetPlace()); + auto ones_t = EigenTensor::From(ones).setConstant((T)1); + // Get grid tensor with shape [n, h, w, 3] by concatenating h_idx, w_idx and + // ones + Tensor w_idx_map; + w_idx_map.mutable_data({h, w, 1}, ctx.GetPlace()); + auto w_idx_map_t = EigenTensor::From(w_idx_map); + Tensor h_idx_map; + h_idx_map.mutable_data({h, w, 1}, ctx.GetPlace()); + auto h_idx_map_t = EigenTensor::From(h_idx_map); + Tensor w_h_idx_map; + w_h_idx_map.mutable_data({h, w, 2}, ctx.GetPlace()); + auto w_h_idx_map_t = EigenTensor::From(w_h_idx_map); + Tensor w_h_one_idx_map; + w_h_one_idx_map.mutable_data({h, w, 3}, ctx.GetPlace()); + auto w_h_one_idx_map_t = EigenTensor::From(w_h_one_idx_map); + + w_idx_map_t.device(place) = w_idx_t.reshape(Array2(1, w)) + .broadcast(Array2(h, 1)) + .reshape(Array3(h, w, 1)); + + h_idx_map_t.device(place) = h_idx_t.reshape(Array2(1, h)) + .broadcast(Array2(w, 1)) + .shuffle(Array2(1, 0)) + .reshape(Array3(h, w, 1)); + + w_h_idx_map_t.device(place) = w_idx_map_t.concatenate(h_idx_map_t, 2); + w_h_one_idx_map_t.device(place) = w_h_idx_map_t.concatenate(ones_t, 2); + grid_t.device(place) = w_h_one_idx_map_t.reshape(Array4(1, h, w, 3)) + .broadcast(Array4(n, 1, 1, 1)); +} + +template +class AffineGridOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* theta = ctx.Input("Theta"); + int n = theta->dims()[0]; + auto size_attr = ctx.Attr>("output_shape"); + int h = 0; + int w = 0; + if (size_attr.size() == 0) { + auto* output_shape = ctx.Input("OutputShape"); + Tensor h_sizes; + framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); + const int* h_size_data = h_sizes.data(); + h = h_size_data[2]; + w = h_size_data[3]; + } else { + h = size_attr[2]; + w = size_attr[3]; + } + auto* output = ctx.Output("Output"); + output->mutable_data({n, h, w, 2}, ctx.GetPlace()); + math::SetConstant()( + ctx.template device_context(), output, + static_cast(0)); + Tensor grid; + GetIdxMap(n, h, w, &grid, ctx); + // output = grid * theta.T + // TODO(wanghaoshuang): Refine batched matrix multiply + auto blas = math::GetBlas(ctx); + for (int i = 0; i < n; ++i) { + Tensor sliced_grid = grid.Slice(i, i + 1).Resize({h * w, 3}); + Tensor sliced_theta = theta->Slice(i, i + 1).Resize({2, 3}); + Tensor sliced_out = output->Slice(i, i + 1).Resize({h * w, 2}); + blas.MatMul(sliced_grid, false, sliced_theta, true, T(1), &sliced_out, + T(0)); + } + } +}; + +template +class AffineGridGradOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto output_grad = ctx.Input(framework::GradVarName("Output")); + auto theta_grad = ctx.Output(framework::GradVarName("Theta")); + int n = output_grad->dims()[0]; + auto size_attr = ctx.Attr>("output_shape"); + int h = 0; + int w = 0; + if (size_attr.size() == 0) { + auto* output_shape = ctx.Input("OutputShape"); + Tensor h_sizes; + framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); + const int* h_size_data = h_sizes.data(); + h = h_size_data[2]; + w = h_size_data[3]; + } else { + h = size_attr[2]; + w = size_attr[3]; + } + theta_grad->mutable_data({n, 2, 3}, ctx.GetPlace()); + math::SetConstant()( + ctx.template device_context(), theta_grad, + static_cast(0)); + Tensor grid; + GetIdxMap(n, h, w, &grid, ctx); + // output = grid * theta.T + // TODO(wanghaoshuang): Refine batched matrix multiply + auto blas = math::GetBlas(ctx); + for (int i = 0; i < n; ++i) { + Tensor sliced_grid = grid.Slice(i, i + 1).Resize({h * w, 3}); + Tensor sliced_out_grad = output_grad->Slice(i, i + 1).Resize({h * w, 2}); + Tensor sliced_theta_grad = theta_grad->Slice(i, i + 1).Resize({2, 3}); + blas.MatMul(sliced_out_grad, true, sliced_grid, false, T(1), + &sliced_theta_grad, T(0)); + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/batch_norm_op.cu.cc b/paddle/fluid/operators/batch_norm_op.cu.cc index ca6cd8669352fd5814f25a04433ca97fe4abe9ff..aaed335c905c0d80cd519afc5fecb06af73fcfe7 100644 --- a/paddle/fluid/operators/batch_norm_op.cu.cc +++ b/paddle/fluid/operators/batch_norm_op.cu.cc @@ -219,8 +219,8 @@ class BatchNormGradKernel auto *d_bias = ctx.Output(framework::GradVarName("Bias")); d_x->mutable_data(ctx.GetPlace()); - d_scale->mutable_data(ctx.GetPlace()); - d_bias->mutable_data(ctx.GetPlace()); + d_scale->mutable_data>(ctx.GetPlace()); + d_bias->mutable_data>(ctx.GetPlace()); auto &dev_ctx = ctx.template device_context(); if ((N * H * W * D) == 1) { @@ -272,8 +272,10 @@ class BatchNormGradKernel const auto *saved_mean = ctx.Input("SavedMean"); const auto *saved_var = ctx.Input("SavedVariance"); - const void *saved_mean_data = saved_mean->template data(); - const void *saved_var_data = saved_var->template data(); + const void *saved_mean_data = + saved_mean->template data>(); + const void *saved_var_data = + saved_var->template data>(); CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationBackward( dev_ctx.cudnn_handle(), mode_, CudnnDataType::kOne(), @@ -281,10 +283,10 @@ class BatchNormGradKernel CudnnDataType::kZero(), data_desc_, x->template data(), data_desc_, d_y->template data(), data_desc_, d_x->template mutable_data(ctx.GetPlace()), bn_param_desc_, - scale->template data(), - d_scale->template mutable_data(ctx.GetPlace()), - d_bias->template mutable_data(ctx.GetPlace()), epsilon, - saved_mean_data, saved_var_data)); + scale->template data>(), + d_scale->template mutable_data>(ctx.GetPlace()), + d_bias->template mutable_data>(ctx.GetPlace()), + epsilon, saved_mean_data, saved_var_data)); // clean when exit. CUDNN_ENFORCE(platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); @@ -304,4 +306,5 @@ REGISTER_OP_CUDA_KERNEL( ops::BatchNormKernel); REGISTER_OP_CUDA_KERNEL( batch_norm_grad, ops::BatchNormGradKernel, - ops::BatchNormGradKernel); + ops::BatchNormGradKernel, + ops::BatchNormGradKernel); diff --git a/paddle/fluid/operators/checkpoint_notify_op.cc b/paddle/fluid/operators/checkpoint_notify_op.cc index 3a2527e407bb179c4873fa3ffe2e8f22fb47faf7..7c072cb071a5d1e6a0549cf6d9eff18fd2533edc 100644 --- a/paddle/fluid/operators/checkpoint_notify_op.cc +++ b/paddle/fluid/operators/checkpoint_notify_op.cc @@ -38,9 +38,10 @@ class CheckpointNotifyOp : public framework::OperatorBase { std::vector epmap = Attr>("epmap"); std::string dir = Attr("dir"); std::string lookup_table_name = Attr("lookup_table"); + int trainer_id = Attr("trainer_id"); distributed::RPCClient* rpc_client = - distributed::RPCClient::GetInstance(); + distributed::RPCClient::GetInstance(trainer_id); for (size_t i = 0; i < epmap.size(); i++) { auto lookup_table_save_dir = string::Sprintf("%s/%s_%d", dir, lookup_table_name, i); @@ -63,6 +64,7 @@ class CheckpointNotifyOpMaker : public framework::OpProtoAndCheckerMaker { "dir", "(string, default '') indicate the folder checkpoint will use"); AddAttr("lookup_table", "(string, default '') the lookup table name"); + AddAttr("trainer_id", "trainer id from 0 ~ worker_num.").SetDefault(0); AddComment(R"DOC( CheckpointNotify operator diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index 4a7a6bcf7154d5680de751e3c933be46fb09fd74..76eda51ad414030074b69ee8d4f796c5c32d12f3 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -143,9 +143,11 @@ class CUDNNConvOpKernel : public framework::OpKernel { cudnn_conv_desc, CUDNN_TENSOR_OP_MATH)); // Currently tensor core is only enabled using this algo algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; + VLOG(5) << "use cudnn_tensor_op_math"; } else { CUDNN_ENFORCE(platform::dynload::cudnnSetConvolutionMathType( cudnn_conv_desc, CUDNN_DEFAULT_MATH)); + VLOG(5) << "NOT use cudnn_tensor_op_math"; } #endif @@ -160,6 +162,7 @@ class CUDNNConvOpKernel : public framework::OpKernel { // ------------------- cudnn conv forward --------------------- ScalingParamType alpha = 1.0f, beta = 0.0f; + auto workspace_handle = dev_ctx.cudnn_workspace_handle(); for (int i = 0; i < groups; i++) { auto cudnn_func = [&](void* cudnn_workspace) { CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward( @@ -168,7 +171,7 @@ class CUDNNConvOpKernel : public framework::OpKernel { cudnn_conv_desc, algo, cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_output_desc, output_data + i * group_offset_out)); }; - dev_ctx.RunCudnnFuncWithWorkspace(cudnn_func, workspace_size_in_bytes); + workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); } } }; @@ -314,6 +317,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { // ------------------- cudnn conv backward data --------------------- ScalingParamType alpha = 1.0f, beta = 0.0f; + auto workspace_handle = dev_ctx.cudnn_workspace_handle(); if (input_grad) { T* input_grad_data = input_grad->mutable_data(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset input_grad. @@ -327,7 +331,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { data_algo, cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_input_desc, input_grad_data + i * group_offset_in)); }; - dev_ctx.RunCudnnFuncWithWorkspace(cudnn_func, workspace_size_in_bytes); + workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); } } // ------------------- cudnn conv backward filter --------------------- @@ -343,7 +347,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { filter_algo, cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_filter_desc, filter_grad_data + i * group_offset_filter)); }; - dev_ctx.RunCudnnFuncWithWorkspace(cudnn_func, workspace_size_in_bytes); + workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); } } } @@ -359,7 +363,8 @@ REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvOpKernel); REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvGradOpKernel, - paddle::operators::CUDNNConvGradOpKernel); + paddle::operators::CUDNNConvGradOpKernel, + paddle::operators::CUDNNConvGradOpKernel); REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvOpKernel, diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index 521f423fb022098e6930c333af6b5e54c502cb7e..72cac9bc9fac9d9199e1f45db16e529adef2a676 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -15,6 +15,8 @@ #include "paddle/fluid/operators/conv_op.h" #include "paddle/fluid/platform/mkldnn_helper.h" +#include "paddle/fluid/framework/data_layout_transform.h" + namespace paddle { namespace operators { @@ -57,6 +59,11 @@ class ConvMKLDNNHandler : public platform::MKLDNNHandler { return conv_pd_->dst_primitive_desc().get_size(); } + mkldnn::memory::format GetDstFormat() const { + return static_cast( + conv_pd_->dst_primitive_desc().desc().data.format); + } + size_t GetDiffWeightsMemorySize() const { return conv_bwd_weights_pd_->diff_weights_primitive_desc().get_size(); } @@ -108,6 +115,20 @@ class ConvMKLDNNHandler : public platform::MKLDNNHandler { "@data-weights_mem_p", pipeline); } + std::shared_ptr AcquireResidualDataMemory( + const mkldnn::memory::desc& md, void* ptr) { + return this->AcquireMemory(md, ptr, "@user_residual_data_mem_p"); + } + + std::shared_ptr AcquireDstMemoryFromResidualDataMemory( + const std::shared_ptr& user_residual_memory_p, + void* dst_ptr, + std::vector& pipeline) { // NOLINT + return this->AcquireMemory(user_residual_memory_p, + this->AcquireDstMemoryFromPrimitive(dst_ptr), + "@residual_data_mem_p", pipeline); + } + std::shared_ptr AcquireDiffSrcMemoryFromDataPrimitive( void* ptr) { return this->AcquireMemoryFromPrimitive( @@ -386,7 +407,13 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { auto user_weights_memory_p = handler.AcquireWeightsMemory( user_weights_md, to_void_cast(filter_data)); - T* output_data = nullptr; + // create reorder primitive if the input format is not the preferred one + auto src_memory_p = + handler.AcquireSrcMemoryFromPrimitive(user_src_memory_p, pipeline); + auto weights_memory_p = handler.AcquireWeightsMemoryFromPrimitive( + user_weights_memory_p, pipeline, is_test); + + std::shared_ptr dst_memory_p; if (fuse_residual_conn) { auto residual_param = ctx.Input("ResidualData"); @@ -399,21 +426,34 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { "Output and elementwise parameter need to have the " "same dimension sizes"); - output->ShareDataWith(*residual_param); - output_data = output->mutable_data(ctx.GetPlace()); + if (residual_param->format() != handler.GetDstFormat()) { + auto output_data = + output->mutable_data(ctx.GetPlace(), handler.GetDstMemorySize()); + auto residual_data_tz = + paddle::framework::vectorize2int(residual_param->dims()); + auto residual_data_type = + paddle::framework::ToMKLDNNDataType(residual_param->type()); + + auto user_residual_md = platform::MKLDNNMemDesc( + residual_data_tz, residual_data_type, residual_param->format()); + auto user_residual_memory_p = handler.AcquireResidualDataMemory( + user_residual_md, to_void_cast(residual_param_data)); + + dst_memory_p = handler.AcquireDstMemoryFromResidualDataMemory( + user_residual_memory_p, to_void_cast(output_data), pipeline); + } else { + output->ShareDataWith(*residual_param); + auto output_data = output->mutable_data(ctx.GetPlace()); + dst_memory_p = + handler.AcquireDstMemoryFromPrimitive(to_void_cast(output_data)); + } } else { - output_data = + auto output_data = output->mutable_data(ctx.GetPlace(), handler.GetDstMemorySize()); + dst_memory_p = + handler.AcquireDstMemoryFromPrimitive(to_void_cast(output_data)); } - // create reorder primitive if the input format is not the preferred one - auto src_memory_p = - handler.AcquireSrcMemoryFromPrimitive(user_src_memory_p, pipeline); - auto weights_memory_p = handler.AcquireWeightsMemoryFromPrimitive( - user_weights_memory_p, pipeline, is_test); - auto dst_memory_p = - handler.AcquireDstMemoryFromPrimitive(to_void_cast(output_data)); - // create convolution op primitive std::shared_ptr conv_p; if (bias) { diff --git a/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc b/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc index 73831611d01b8c5b8d2d9f7f15634a0094e4a608..f44094ca6b7b7f23f2e7593ad79e4e2a6f0d3070 100644 --- a/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc @@ -104,6 +104,7 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel { int output_offset = output->numel() / output->dims()[0] / groups; int filter_offset = filter->numel() / groups; T alpha = 1.0f, beta = 0.0f; + auto workspace_handle = dev_ctx.cudnn_workspace_handle(); for (int g = 0; g < groups; g++) { auto cudnn_func = [&](void* cudnn_workspace) { CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( @@ -112,7 +113,7 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel { algo, cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_output_desc, output_data + output_offset * g)); }; - dev_ctx.RunCudnnFuncWithWorkspace(cudnn_func, workspace_size_in_bytes); + workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); } } }; @@ -208,6 +209,7 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { output_grad->numel() / output_grad->dims()[0] / groups; int filter_offset = filter->numel() / groups; T alpha = 1.0f, beta = 0.0f; + auto workspace_handle = dev_ctx.cudnn_workspace_handle(); if (input_grad) { T* input_grad_data = input_grad->mutable_data(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset input_grad. @@ -220,7 +222,7 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_input_desc, input_grad_data + input_offset * g)); }; - dev_ctx.RunCudnnFuncWithWorkspace(cudnn_func, workspace_size_in_bytes); + workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); } } @@ -238,7 +240,7 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_filter_desc, filter_grad_data + filter_offset * g)); }; - dev_ctx.RunCudnnFuncWithWorkspace(cudnn_func, workspace_size_in_bytes); + workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes); } } } diff --git a/paddle/fluid/operators/cross_entropy_op.cu b/paddle/fluid/operators/cross_entropy_op.cu index 30dbd5bd3d39dd2992c3dd91364003bb7715a2eb..fcd34383a85f6984a8f27ce0625364f8fd5e31d6 100644 --- a/paddle/fluid/operators/cross_entropy_op.cu +++ b/paddle/fluid/operators/cross_entropy_op.cu @@ -13,12 +13,17 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/cross_entropy_op.h" +#include "paddle/fluid/platform/float16.h" +namespace plat = paddle::platform; namespace ops = paddle::operators; using CUDACtx = paddle::platform::CUDADeviceContext; REGISTER_OP_CUDA_KERNEL(cross_entropy, ops::CrossEntropyOpKernel, - ops::CrossEntropyOpKernel); -REGISTER_OP_CUDA_KERNEL(cross_entropy_grad, - ops::CrossEntropyGradientOpKernel, - ops::CrossEntropyGradientOpKernel); + ops::CrossEntropyOpKernel, + ops::CrossEntropyOpKernel); + +REGISTER_OP_CUDA_KERNEL( + cross_entropy_grad, ops::CrossEntropyGradientOpKernel, + ops::CrossEntropyGradientOpKernel, + ops::CrossEntropyGradientOpKernel); diff --git a/paddle/fluid/operators/delete_var_op.cc b/paddle/fluid/operators/delete_var_op.cc index d7a9bfbc437dbf4c723b9c87ff62ec6b62c38638..89416f7ab5d07ddac5b540b9bb361f831c1ef360 100644 --- a/paddle/fluid/operators/delete_var_op.cc +++ b/paddle/fluid/operators/delete_var_op.cc @@ -32,6 +32,11 @@ class DeleteVarOp : public framework::OperatorBase { } }; +class DeleteVarOpShapeInference : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *ctx) const override {} +}; + class DeleteVarOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { @@ -48,4 +53,5 @@ It should not be configured by users directly. REGISTER_OPERATOR(delete_var, paddle::operators::DeleteVarOp, paddle::framework::EmptyGradOpMaker, - paddle::operators::DeleteVarOpInfoMaker); + paddle::operators::DeleteVarOpInfoMaker, + paddle::operators::DeleteVarOpShapeInference); diff --git a/paddle/fluid/operators/distributed/grpc_client.cc b/paddle/fluid/operators/distributed/grpc_client.cc index f5d5627815c7320dad5051b0f7d95b8ec6703687..be5c20ad2e4b53e0ff98561b92543b03298381d9 100644 --- a/paddle/fluid/operators/distributed/grpc_client.cc +++ b/paddle/fluid/operators/distributed/grpc_client.cc @@ -79,7 +79,7 @@ VarHandlePtr GRPCClient::AsyncSendVar(const std::string& ep, auto* var = p_scope->FindVar(var_name_val); ::grpc::ByteBuffer req; - SerializeToByteBuffer(var_name_val, var, *p_ctx, &req); + SerializeToByteBuffer(var_name_val, var, *p_ctx, &req, "", trainer_id_); VLOG(3) << s->GetVarHandlePtr()->String() << " begin"; @@ -105,7 +105,10 @@ VarHandlePtr GRPCClient::AsyncSendVar(const std::string& ep, void ProcGetResponse(const VarHandle& var_h, const ::grpc::ByteBuffer& ret_msg) { framework::Variable* outvar = nullptr; - DeserializeFromByteBuffer(ret_msg, *var_h.ctx(), var_h.scope(), &outvar); + // get response's trainer_id is not used + int trainer_id; + DeserializeFromByteBuffer(ret_msg, *var_h.ctx(), var_h.scope(), &outvar, + &trainer_id); } template @@ -135,6 +138,7 @@ VarHandlePtr GRPCClient::AsyncGetVar(const std::string& ep, // prepare input sendrecv::VariableMessage req; req.set_varname(var_name_val); + req.set_trainer_id(trainer_id_); ::grpc::ByteBuffer buf; RequestToByteBuffer(req, &buf); diff --git a/paddle/fluid/operators/distributed/grpc_serde.cc b/paddle/fluid/operators/distributed/grpc_serde.cc index bac098b892658beece85271765eb31eeb3eeda17..b201c4a5763148165f517c719227d6317ecbe350 100644 --- a/paddle/fluid/operators/distributed/grpc_serde.cc +++ b/paddle/fluid/operators/distributed/grpc_serde.cc @@ -34,8 +34,8 @@ namespace distributed { void SerializeToByteBuffer(const std::string& name, framework::Variable* var, const platform::DeviceContext& ctx, - ::grpc::ByteBuffer* msg, - const std::string& out_name) { + ::grpc::ByteBuffer* msg, const std::string& out_name, + const int trainer_id) { platform::RecordRPCEvent record_event("serial", &ctx); // Default DestroyCallback does nothing, When using GPU // the CPU buffer need to be freed. @@ -45,6 +45,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, size_t payload_size; request.set_varname(name); + request.set_trainer_id(trainer_id); // Note: normally the profiler is enabled in 1 trainer, hence only // 1 trainer returns true for ShouldSendProfileState(). It tells PS // servers the trainer's profiling state so that PS can follow the @@ -147,11 +148,12 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, const platform::DeviceContext& ctx, const framework::Scope* scope, - framework::Variable** var) { + framework::Variable** var, int* trainer_id) { platform::RecordRPCEvent record_event("deserial", &ctx); operators::distributed::GRPCVariableResponse resp(scope, &ctx); PADDLE_ENFORCE(resp.Parse(msg) == 0, "parse bytebuffer to tensor error!"); *var = resp.GetVar(); + *trainer_id = resp.GetTrainerId(); } } // namespace distributed diff --git a/paddle/fluid/operators/distributed/grpc_serde.h b/paddle/fluid/operators/distributed/grpc_serde.h index 450c41dcd6b1bf9a33d3bbef3a1c94a2f83ff322..7ec489e961630747ba00e68ad3603cacbb1aa485 100644 --- a/paddle/fluid/operators/distributed/grpc_serde.h +++ b/paddle/fluid/operators/distributed/grpc_serde.h @@ -38,12 +38,13 @@ typedef void (*DestroyCallback)(void*); void SerializeToByteBuffer(const std::string& name, framework::Variable* var, const platform::DeviceContext& ctx, ::grpc::ByteBuffer* msg, - const std::string& out_varname = std::string()); + const std::string& out_varname = std::string(), + const int trainer_id = 0); void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, const platform::DeviceContext& ctx, const framework::Scope* scope, - framework::Variable** var); + framework::Variable** var, int* trainer_id); } // namespace distributed } // namespace operators diff --git a/paddle/fluid/operators/distributed/grpc_server.cc b/paddle/fluid/operators/distributed/grpc_server.cc index 8edb00276df3ade1b320fbf2873e8b54ff3e1464..eb9e36029c01fb38ca4438578190dd7895182ea1 100644 --- a/paddle/fluid/operators/distributed/grpc_server.cc +++ b/paddle/fluid/operators/distributed/grpc_server.cc @@ -102,9 +102,10 @@ class RequestSend final : public RequestBase { auto scope = request_->GetMutableLocalScope(); auto invar = request_->GetVar(); + int trainer_id = request_->GetTrainerId(); framework::Variable* outvar = nullptr; - request_handler_->Handle(varname, scope, invar, &outvar); + request_handler_->Handle(varname, scope, invar, &outvar, trainer_id); Finish(reply_, &responder_); } @@ -133,13 +134,14 @@ class RequestGet final : public RequestBase { void Process() override { // proc request. std::string varname = request_.varname(); + int trainer_id = request_.trainer_id(); VLOG(4) << "RequestGet " << varname; auto scope = request_handler_->scope(); auto invar = scope->FindVar(varname); framework::Variable* outvar = nullptr; - request_handler_->Handle(varname, scope, invar, &outvar); + request_handler_->Handle(varname, scope, invar, &outvar, trainer_id); if (outvar) { SerializeToByteBuffer(varname, outvar, *request_handler_->dev_ctx(), @@ -179,6 +181,7 @@ class RequestPrefetch final : public RequestBase { // prefetch process... std::string in_var_name = request_->Varname(); std::string out_var_name = request_->OutVarname(); + int trainer_id = request_->GetTrainerId(); VLOG(4) << "RequestPrefetch, in_var_name: " << in_var_name << " out_var_name: " << out_var_name; @@ -187,7 +190,8 @@ class RequestPrefetch final : public RequestBase { // out var must be created in local scope! framework::Variable* outvar = scope->Var(out_var_name); - request_handler_->Handle(in_var_name, scope, invar, &outvar, out_var_name); + request_handler_->Handle(in_var_name, scope, invar, &outvar, trainer_id, + out_var_name); SerializeToByteBuffer(out_var_name, outvar, *request_handler_->dev_ctx(), &reply_); @@ -225,12 +229,13 @@ class RequestCheckpointNotify final : public RequestBase { std::string checkpoint_notify = request_->Varname(); std::string checkpoint_dir = request_->OutVarname(); + int trainer_id = request_->GetTrainerId(); VLOG(4) << "RequestCheckpointNotify notify: " << checkpoint_notify << ", dir: " << checkpoint_dir; request_handler_->Handle(checkpoint_notify, scope, nullptr, nullptr, - checkpoint_dir); + trainer_id, checkpoint_dir); Finish(reply_, &responder_); } diff --git a/paddle/fluid/operators/distributed/grpc_variable_response.cc b/paddle/fluid/operators/distributed/grpc_variable_response.cc index 34d47f3ec0f3025109447b66078b724607d2953a..d6d219d4369ba785e5c369538d4a18dc682952c1 100644 --- a/paddle/fluid/operators/distributed/grpc_variable_response.cc +++ b/paddle/fluid/operators/distributed/grpc_variable_response.cc @@ -286,13 +286,21 @@ int GRPCVariableResponse::Parse(Source* source) { platform::EnableProfiler(platform::ProfilerState::kCPU); } else if (profiling == platform::kDisableProfiler && platform::IsProfileEnabled()) { - // TODO(panyx0718): Should we allow to customize file dir. platform::DisableProfiler( platform::EventSortingKey::kDefault, - string::Sprintf("/tmp/profile_ps_%lld", listener_id)); + string::Sprintf("%s_%lld", FLAGS_rpc_server_profile_path, + listener_id)); } break; } + case sendrecv::VariableMessage::kTrainerIdFieldNumber: { + uint64_t trainer_id = 0; + if (!input.ReadVarint64(&trainer_id)) { + return tag; + } + meta_.set_trainer_id(trainer_id); + break; + } default: { // Unknown tag, return unknown error. return -1; diff --git a/paddle/fluid/operators/distributed/request_handler.h b/paddle/fluid/operators/distributed/request_handler.h index 5be7095acd3c5ac6f880a8a26c246f60a93643b5..3c1db147098055e9974c9dc607266cdaf2e43dae 100644 --- a/paddle/fluid/operators/distributed/request_handler.h +++ b/paddle/fluid/operators/distributed/request_handler.h @@ -190,6 +190,7 @@ class RequestHandler { // } virtual bool Handle(const std::string& varname, framework::Scope* scope, framework::Variable* var, framework::Variable** outvar, + const int trainer_id, const std::string& out_var_name = "") = 0; protected: diff --git a/paddle/fluid/operators/distributed/request_handler_impl.cc b/paddle/fluid/operators/distributed/request_handler_impl.cc index 849e412504eb9180b746db65fd4fa353ed0c05a1..025528fe70b8f4d353ab92f29b1bd71c77cf7850 100644 --- a/paddle/fluid/operators/distributed/request_handler_impl.cc +++ b/paddle/fluid/operators/distributed/request_handler_impl.cc @@ -36,6 +36,7 @@ bool RequestSendHandler::Handle(const std::string& varname, framework::Scope* scope, framework::Variable* invar, framework::Variable** outvar, + const int trainer_id, const std::string& out_var_name) { VLOG(4) << "RequestSendHandler:" << varname; @@ -50,7 +51,6 @@ bool RequestSendHandler::Handle(const std::string& varname, // Async if (!sync_mode_) { VLOG(3) << "async process var: " << varname; - rpc_server_->Profiler().OneStep(); try { executor_->RunPreparedContext((*grad_to_prepared_ctx_)[varname].get(), scope); @@ -76,6 +76,7 @@ bool RequestGetHandler::Handle(const std::string& varname, framework::Scope* scope, framework::Variable* invar, framework::Variable** outvar, + const int trainer_id, const std::string& out_var_name) { VLOG(4) << "RequestGetHandler:" << varname; if (sync_mode_) { @@ -88,6 +89,19 @@ bool RequestGetHandler::Handle(const std::string& varname, } } else { if (varname != FETCH_BARRIER_MESSAGE && varname != COMPLETE_MESSAGE) { + if (enable_dc_asgd_) { + // NOTE: the format is determined by distributed_transpiler.py + std::string param_bak_name = + string::Sprintf("%s.trainer_%d_bak", varname, trainer_id); + VLOG(3) << "getting " << param_bak_name << " trainer_id " << trainer_id; + auto var = scope_->FindVar(varname); + auto t_orig = var->Get(); + auto param_bak = scope_->Var(param_bak_name); + auto t = param_bak->GetMutable(); + t->mutable_data(dev_ctx_->GetPlace(), t_orig.type()); + VLOG(3) << "copying " << varname << " to " << param_bak_name; + framework::TensorCopy(t_orig, dev_ctx_->GetPlace(), t); + } *outvar = scope_->FindVar(varname); } } @@ -98,6 +112,7 @@ bool RequestPrefetchHandler::Handle(const std::string& varname, framework::Scope* scope, framework::Variable* invar, framework::Variable** outvar, + const int trainer_id, const std::string& out_var_name) { VLOG(4) << "RequestPrefetchHandler " << varname; @@ -113,6 +128,7 @@ bool RequestCheckpointHandler::Handle(const std::string& varname, framework::Scope* scope, framework::Variable* invar, framework::Variable** outvar, + const int trainer_id, const std::string& out_var_name) { PADDLE_ENFORCE( checkpoint_notify_id != -1, diff --git a/paddle/fluid/operators/distributed/request_handler_impl.h b/paddle/fluid/operators/distributed/request_handler_impl.h index 8be5b21bb89a580f4091de19186fd2d7e5802478..c1afda9dd2445e492d8b93659c9ff13e6e1030b8 100644 --- a/paddle/fluid/operators/distributed/request_handler_impl.h +++ b/paddle/fluid/operators/distributed/request_handler_impl.h @@ -36,20 +36,34 @@ namespace distributed { class RequestSendHandler final : public RequestHandler { public: - explicit RequestSendHandler(bool sync_mode) : RequestHandler(sync_mode) {} + explicit RequestSendHandler(bool sync_mode, bool enable_dc_asgd = false) + : RequestHandler(sync_mode) { + enable_dc_asgd_ = enable_dc_asgd; + } virtual ~RequestSendHandler() {} bool Handle(const std::string& varname, framework::Scope* scope, framework::Variable* var, framework::Variable** outvar, + const int trainer_id, const std::string& out_var_name = "") override; + + private: + bool enable_dc_asgd_; }; class RequestGetHandler final : public RequestHandler { public: - explicit RequestGetHandler(bool sync_mode) : RequestHandler(sync_mode) {} + explicit RequestGetHandler(bool sync_mode, bool enable_dc_asgd = false) + : RequestHandler(sync_mode) { + enable_dc_asgd_ = enable_dc_asgd; + } virtual ~RequestGetHandler() {} bool Handle(const std::string& varname, framework::Scope* scope, framework::Variable* var, framework::Variable** outvar, + const int trainer_id, const std::string& out_var_name = "") override; + + private: + bool enable_dc_asgd_; }; class RequestPrefetchHandler final : public RequestHandler { @@ -58,6 +72,7 @@ class RequestPrefetchHandler final : public RequestHandler { virtual ~RequestPrefetchHandler() {} bool Handle(const std::string& varname, framework::Scope* scope, framework::Variable* var, framework::Variable** outvar, + const int trainer_id, const std::string& out_var_name = "") override; }; @@ -70,6 +85,7 @@ class RequestCheckpointHandler final : public RequestHandler { virtual ~RequestCheckpointHandler() {} bool Handle(const std::string& varname, framework::Scope* scope, framework::Variable* var, framework::Variable** outvar, + const int trainer_id, const std::string& out_var_name = "") override; private: diff --git a/paddle/fluid/operators/distributed/rpc_client.cc b/paddle/fluid/operators/distributed/rpc_client.cc index b5ec9fe5367beb97b3cc7298102deff1e8ca4ec9..390e9af0f38c920f39e8cd3e1b3d28fd89b239fe 100644 --- a/paddle/fluid/operators/distributed/rpc_client.cc +++ b/paddle/fluid/operators/distributed/rpc_client.cc @@ -24,6 +24,7 @@ namespace distributed { std::once_flag RPCClient::init_flag_; std::unique_ptr RPCClient::rpc_client_(nullptr); +int RPCClient::trainer_id_ = 0; } // namespace distributed } // namespace operators diff --git a/paddle/fluid/operators/distributed/rpc_client.h b/paddle/fluid/operators/distributed/rpc_client.h index 3539ee5e459d6dfe0b6510806464bcc6817910bb..1983802e49506c79041112ac87d429e4c084ddfd 100644 --- a/paddle/fluid/operators/distributed/rpc_client.h +++ b/paddle/fluid/operators/distributed/rpc_client.h @@ -72,14 +72,15 @@ class RPCClient { virtual bool Wait() = 0; template - static RPCClient* GetInstance() { - std::call_once(init_flag_, &RPCClient::Init); + static RPCClient* GetInstance(int trainer_id) { + std::call_once(init_flag_, &RPCClient::Init, trainer_id); return rpc_client_.get(); } // Init is called by GetInstance. template - static void Init() { + static void Init(int trainer_id) { + trainer_id_ = trainer_id; if (rpc_client_.get() == nullptr) { rpc_client_.reset(new T()); rpc_client_->InitImpl(); @@ -88,6 +89,8 @@ class RPCClient { protected: virtual void InitImpl() {} + // each trainer have exact one trainer id, it should be static + static int trainer_id_; private: static std::once_flag init_flag_; diff --git a/paddle/fluid/operators/distributed/rpc_server.cc b/paddle/fluid/operators/distributed/rpc_server.cc index 084480ae48b8b9267ade1a840f6a70519cb28e48..3e30ed4ac86bd2cb3f7c4301163e54a947c3d5b4 100644 --- a/paddle/fluid/operators/distributed/rpc_server.cc +++ b/paddle/fluid/operators/distributed/rpc_server.cc @@ -20,42 +20,10 @@ #include "paddle/fluid/operators/distributed/rpc_server.h" #include "paddle/fluid/platform/profiler.h" -DEFINE_int32(rpc_server_profile_period, 0, - "the period of listen_and_serv to do profile"); -DEFINE_string(rpc_server_profile_path, "/dev/null", - "the profile log file path"); - namespace paddle { namespace operators { namespace distributed { -RPCServerProfiler::RPCServerProfiler(int profile_period, - const std::string& profile_log_path) - : profile_period_(profile_period), profile_log_path_(profile_log_path) { - step_ = 0; -} - -void RPCServerProfiler::OneStep() { - PADDLE_ENFORCE_LE(step_, profile_period_, - "step_ should not be larger then " - "profile_period_"); - if (profile_period_ <= 0) { - return; - } - - if (step_ == 0) { - auto pf_state = paddle::platform::ProfilerState::kCPU; - paddle::platform::EnableProfiler(pf_state); - } - if (step_ == profile_period_) { - paddle::platform::DisableProfiler(paddle::platform::EventSortingKey::kTotal, - profile_log_path_); - step_ = 0; - } else { - step_++; - } -} - void RPCServer::ShutDown() { LOG(INFO) << "RPCServer ShutDown "; ShutDownImpl(); diff --git a/paddle/fluid/operators/distributed/rpc_server.h b/paddle/fluid/operators/distributed/rpc_server.h index f3e61e1575ced0b9ffbad23e6973121daca9751b..c78c5007a7f262f15305b6c284e8c4fbddef42a0 100644 --- a/paddle/fluid/operators/distributed/rpc_server.h +++ b/paddle/fluid/operators/distributed/rpc_server.h @@ -23,30 +23,14 @@ #include "paddle/fluid/operators/distributed/request_handler.h" -DECLARE_int32(rpc_server_profile_period); -DECLARE_string(rpc_server_profile_path); - namespace paddle { namespace operators { namespace distributed { -class RPCServerProfiler { - public: - RPCServerProfiler(int profile_period, const std::string& profile_log_path); - void OneStep(); - - private: - const int profile_period_; - std::string profile_log_path_; - int step_; -}; - class RPCServer { public: explicit RPCServer(const std::string& address, int client_num) : cur_cond_(0), - profiler_(FLAGS_rpc_server_profile_period, - FLAGS_rpc_server_profile_path), bind_address_(address), exit_flag_(false), selected_port_(0), @@ -86,7 +70,6 @@ class RPCServer { void Complete(); void ResetBarrierCounter(); - RPCServerProfiler& Profiler() { return profiler_; } bool NeedResetAllVars(); @@ -101,7 +84,6 @@ class RPCServer { std::unordered_map rpc_cond_map_; std::atomic cur_cond_; std::condition_variable rpc_cond_; - RPCServerProfiler profiler_; protected: std::string bind_address_; diff --git a/paddle/fluid/operators/distributed/rpc_server_test.cc b/paddle/fluid/operators/distributed/rpc_server_test.cc index d6176e1443d2a441af7878e5efe99796d486bb7a..c3dd459fc4e8c4bd304c09d7a3ed4f456c4dc69f 100644 --- a/paddle/fluid/operators/distributed/rpc_server_test.cc +++ b/paddle/fluid/operators/distributed/rpc_server_test.cc @@ -125,7 +125,7 @@ TEST(PREFETCH, CPU) { g_req_handler.reset(new distributed::RequestPrefetchHandler(true)); g_rpc_service.reset(new RPCSERVER_T("127.0.0.1:0", 1)); distributed::RPCClient* client = - distributed::RPCClient::GetInstance(); + distributed::RPCClient::GetInstance(0); std::thread server_thread(StartServer, distributed::kRequestPrefetch); g_rpc_service->WaitServerReady(); @@ -165,7 +165,7 @@ TEST(COMPLETE, CPU) { g_req_handler.reset(new distributed::RequestSendHandler(true)); g_rpc_service.reset(new RPCSERVER_T("127.0.0.1:0", 2)); distributed::RPCClient* client = - distributed::RPCClient::GetInstance(); + distributed::RPCClient::GetInstance(0); PADDLE_ENFORCE(client != nullptr); std::thread server_thread(StartServer, distributed::kRequestSend); g_rpc_service->WaitServerReady(); diff --git a/paddle/fluid/operators/distributed/send_recv.proto.in b/paddle/fluid/operators/distributed/send_recv.proto.in index 8b0a09abe1d05dda10eda0030eb91cb9ca40683e..55820c980e8139625c1b589f9d2d68dfee74a212 100644 --- a/paddle/fluid/operators/distributed/send_recv.proto.in +++ b/paddle/fluid/operators/distributed/send_recv.proto.in @@ -79,6 +79,7 @@ message VariableMessage { // server stops profiling and generates a profile to /tmp/profile_ps_* // when profile switches from 1 to 2. int64 profile = 11; + int64 trainer_id = 12; } message VoidMessage {} diff --git a/paddle/fluid/operators/distributed/variable_response.cc b/paddle/fluid/operators/distributed/variable_response.cc index c4854d50b6371064003a10e18efc9e5f160d9a42..b2f73b67dc9bf944892187abd2e5709e54449d7d 100644 --- a/paddle/fluid/operators/distributed/variable_response.cc +++ b/paddle/fluid/operators/distributed/variable_response.cc @@ -16,6 +16,9 @@ #include #include "paddle/fluid/operators/distributed/sendrecvop_utils.h" +DEFINE_string(rpc_server_profile_path, "./profile_ps", + "the profile log file path"); + namespace paddle { namespace operators { namespace distributed { diff --git a/paddle/fluid/operators/distributed/variable_response.h b/paddle/fluid/operators/distributed/variable_response.h index 6aec52ca00f59a42ecca01da8df1680ce4eda432..4c7fcbbdfb305ce6b4fc9d1edd9738899b200ec6 100644 --- a/paddle/fluid/operators/distributed/variable_response.h +++ b/paddle/fluid/operators/distributed/variable_response.h @@ -27,6 +27,8 @@ #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/operators/distributed/send_recv.pb.h" +DECLARE_string(rpc_server_profile_path); + namespace paddle { namespace operators { namespace distributed { @@ -92,6 +94,8 @@ class VariableResponse { return scope_->FindVar(meta_.varname()); } + int GetTrainerId() { return static_cast(meta_.trainer_id()); } + protected: bool ReadRaw(::google::protobuf::io::CodedInputStream* input, const platform::DeviceContext& dev_ctx, platform::Place place, diff --git a/paddle/fluid/operators/elementwise_add_op.cu b/paddle/fluid/operators/elementwise_add_op.cu index dfff518f170b56d180b6883c363effb8dbd677b6..f9f5c66d34fa1d73db00173e493f9953b8579518 100644 --- a/paddle/fluid/operators/elementwise_add_op.cu +++ b/paddle/fluid/operators/elementwise_add_op.cu @@ -30,4 +30,5 @@ REGISTER_OP_CUDA_KERNEL( ops::ElementwiseAddGradKernel, ops::ElementwiseAddGradKernel, ops::ElementwiseAddGradKernel, - ops::ElementwiseAddGradKernel); + ops::ElementwiseAddGradKernel, + ops::ElementwiseAddGradKernel); diff --git a/paddle/fluid/operators/elementwise_op_function.h b/paddle/fluid/operators/elementwise_op_function.h index a6933a16df320b5052150f57ce352d32e4d40bdb..d7444bcfe0b876b17a52a38628cbc526af50cb31 100644 --- a/paddle/fluid/operators/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise_op_function.h @@ -391,7 +391,7 @@ static __global__ void ElemwiseGradBroadcast1CUDAKernel( int j = blockIdx.x; int i = threadIdx.x; int tid = threadIdx.x; - T val = 0; + T val(0); do { int x_offset = i * w + j; @@ -459,7 +459,7 @@ static __global__ void ElemwiseGradBroadcast2CUDAKernel( int tid = threadIdx.x; int j = blockIdx.x; - T val = 0; + T val(0); int ttid = tid; while (true) { diff --git a/paddle/fluid/operators/fetch_barrier_op.cc b/paddle/fluid/operators/fetch_barrier_op.cc index 9d7ac7ab6194593747548fac3cefc8d4ed3058d8..8754856e140ed074782e6fccb8991571a12babab 100644 --- a/paddle/fluid/operators/fetch_barrier_op.cc +++ b/paddle/fluid/operators/fetch_barrier_op.cc @@ -37,7 +37,8 @@ class FetchBarrierOp : public framework::OperatorBase { const platform::Place& place) const override { std::vector eps = Attr>("endpoints"); distributed::RPCClient* rpc_client = - distributed::RPCClient::GetInstance(); + distributed::RPCClient::GetInstance( + Attr("trainer_id")); PADDLE_ENFORCE(rpc_client->Wait(), "internal error in RPCClient"); @@ -61,6 +62,7 @@ This operator will send a send barrier signal to list_and_serv op, so that the Parameter Server would knew all variables have been sent. )DOC"); + AddAttr("trainer_id", "trainer id from 0 ~ worker_num.").SetDefault(0); AddAttr>("endpoints", "(string vector, default 127.0.0.1:6164)" "Server endpoints to send variables to.") diff --git a/paddle/fluid/operators/gen_nccl_id_op.cc b/paddle/fluid/operators/gen_nccl_id_op.cc index 697c239e59d158428ae9ba9f7feded19637dff28..ef574ccdf48dcf6074a777bcb7667b114415674c 100644 --- a/paddle/fluid/operators/gen_nccl_id_op.cc +++ b/paddle/fluid/operators/gen_nccl_id_op.cc @@ -61,7 +61,7 @@ class GenNCCLIdOp : public framework::OperatorBase { std::vector endpoint_list = Attr>("endpoint_list"); distributed::RPCClient* client = - distributed::RPCClient::GetInstance(); + distributed::RPCClient::GetInstance(0); for (auto& ep : endpoint_list) { VLOG(3) << "sending nccl id to " << ep; diff --git a/paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc b/paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc new file mode 100644 index 0000000000000000000000000000000000000000..7cde7ca462fda9ae6ace7755af0a432afee28bba --- /dev/null +++ b/paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc @@ -0,0 +1,132 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/cudnn_helper.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; +using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; +using DataLayout = platform::DataLayout; +using ScopedSpatialTransformerDescriptor = + platform::ScopedSpatialTransformerDescriptor; +template +using CudnnDataType = platform::CudnnDataType; + +template +class CUDNNGridSampleOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "It must use CUDAPlace"); + auto& dev_ctx = ctx.template device_context(); + auto handle = dev_ctx.cudnn_handle(); + auto* input = ctx.Input("X"); + auto* grid = ctx.Input("Grid"); + auto* output = ctx.Output("Output"); + + int n = input->dims()[0]; + int c = input->dims()[1]; + int h = input->dims()[2]; + int w = input->dims()[3]; + const int size[4] = {n, c, h, w}; + + const T* input_data = input->data(); + const T* grid_data = grid->data(); + T* output_data = output->mutable_data({n, c, h, w}, ctx.GetPlace()); + + ScopedSpatialTransformerDescriptor st_desc; + cudnnSpatialTransformerDescriptor_t cudnn_st_desc = + st_desc.descriptor(4, size); + + ScopedTensorDescriptor input_desc; + ScopedTensorDescriptor output_desc; + cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( + DataLayout::kNCHW, framework::vectorize2int(input->dims())); + cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor( + DataLayout::kNCHW, framework::vectorize2int(output->dims())); + + CUDNN_ENFORCE(platform::dynload::cudnnSpatialTfSamplerForward( + handle, cudnn_st_desc, CudnnDataType::kOne(), cudnn_input_desc, + input_data, grid_data, CudnnDataType::kZero(), cudnn_output_desc, + output_data)); + } +}; + +template +class CUDNNGridSampleGradOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "It must use CUDAPlace"); + auto& dev_ctx = ctx.template device_context(); + auto handle = dev_ctx.cudnn_handle(); + auto* input = ctx.Input("X"); + auto* grid = ctx.Input("Grid"); + auto* output_grad = ctx.Input(framework::GradVarName("Output")); + auto* input_grad = ctx.Output(framework::GradVarName("X")); + auto* grid_grad = ctx.Output(framework::GradVarName("Grid")); + + auto output_grad_dims = output_grad->dims(); + const int n = output_grad_dims[0]; + const int c = output_grad_dims[1]; + const int h = output_grad_dims[2]; + const int w = output_grad_dims[3]; + const int size[4] = {n, c, h, w}; + + ScopedSpatialTransformerDescriptor st_dest; + cudnnSpatialTransformerDescriptor_t cudnn_st_dest = + st_dest.descriptor(4, size); + + const T* input_data = input->data(); + const T* grid_data = grid->data(); + const T* output_grad_data = output_grad->data(); + T* input_grad_data = + input_grad->mutable_data(output_grad_dims, ctx.GetPlace()); + T* grid_grad_data = + grid_grad->mutable_data({n, h, w, 2}, ctx.GetPlace()); + + ScopedTensorDescriptor input_desc; + ScopedTensorDescriptor input_grad_desc; + ScopedTensorDescriptor output_grad_desc; + cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( + DataLayout::kNCHW, framework::vectorize2int(input->dims())); + cudnnTensorDescriptor_t cudnn_input_grad_desc = + input_grad_desc.descriptor( + DataLayout::kNCHW, framework::vectorize2int(input_grad->dims())); + cudnnTensorDescriptor_t cudnn_output_grad_desc = + output_grad_desc.descriptor( + DataLayout::kNCHW, framework::vectorize2int(output_grad->dims())); + + CUDNN_ENFORCE(platform::dynload::cudnnSpatialTfSamplerBackward( + handle, cudnn_st_dest, CudnnDataType::kOne(), cudnn_input_desc, + input_data, CudnnDataType::kZero(), cudnn_input_grad_desc, + input_grad_data, CudnnDataType::kOne(), cudnn_output_grad_desc, + output_grad_data, grid_data, CudnnDataType::kZero(), + grid_grad_data)); + } +}; + +} // namespace operators +} // namespace paddle + +namespace plat = paddle::platform; +REGISTER_OP_KERNEL(grid_sampler, CUDNN, plat::CUDAPlace, + paddle::operators::CUDNNGridSampleOpKernel, + paddle::operators::CUDNNGridSampleOpKernel); +REGISTER_OP_KERNEL(grid_sampler_grad, CUDNN, plat::CUDAPlace, + paddle::operators::CUDNNGridSampleGradOpKernel, + paddle::operators::CUDNNGridSampleGradOpKernel); diff --git a/paddle/fluid/operators/grid_sampler_op.cc b/paddle/fluid/operators/grid_sampler_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..e76eb6893b1f7b6a965682368860c02fa32f6330 --- /dev/null +++ b/paddle/fluid/operators/grid_sampler_op.cc @@ -0,0 +1,203 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/grid_sampler_op.h" +#include "paddle/fluid/framework/op_registry.h" +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/platform/cudnn_helper.h" +#endif + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +class GridSampleOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of GridSampleOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Grid"), + "Input(Grid) of GridSampleOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Output"), + "Output(Output) of GridSampleOp should not be null."); + + auto x_dims = ctx->GetInputDim("X"); + auto grid_dims = ctx->GetInputDim("Grid"); + PADDLE_ENFORCE(x_dims.size() == 4, + "Input(X) of GridSampleOp should be 4-D Tensor."); + PADDLE_ENFORCE(grid_dims.size() == 4, + "Input(Grid) of GridSampleOp should be 4-D Tensor."); + PADDLE_ENFORCE(grid_dims[3] == 2, "Input(Grid) dims[3] should be 2."); + PADDLE_ENFORCE_EQ(grid_dims[0], x_dims[0], + "Input(X) and Input(Grid) dims[0] should be equal."); + PADDLE_ENFORCE_EQ( + grid_dims[1], x_dims[2], + "Input(X) dims[2] and Input(Grid) dims[1] should be equal."); + PADDLE_ENFORCE_EQ( + grid_dims[2], x_dims[3], + "Input(X) dims[3] and Input(Grid) dims[2] should be equal."); + + ctx->SetOutputDim("Output", x_dims); + ctx->ShareLoD("X", "Output"); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + framework::LibraryType library_{framework::LibraryType::kPlain}; +#ifdef PADDLE_WITH_CUDA + if (platform::CanCUDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kCUDNN; + } +#endif + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), + framework::DataLayout::kAnyLayout, library_); + } +}; + +class GridSampleOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", + "(Tensor) The input data of GridSampleOp, " + "This is a 4-D tensor with shape of [N, C, H, W]"); + AddInput( + "Grid", + "(Tensor) The input grid of GridSampleOp generated by AffineGridOp, " + "This is a 4-D tensor with shape of [N, H, W, 2] is the concatenation " + "of x and y coordinates with shape [N, H, W] in last dimention"); + AddOutput("Output", "(Tensor) Output tensor with shape [N, C, H, W]"); + AddAttr( + "use_cudnn", + "(bool, default true) Only used in cudnn kernel, need install cudnn") + .SetDefault(true); + + AddComment(R"DOC( + This operation samples input X by using bilinear interpolation based on + flow field grid, which is usually gennerated by affine_grid. The grid of + shape [N, H, W, 2] is the concatenation of (grid_x, grid_y) coordinates + with shape [N, H, W] each, where grid_x is indexing the 4th dimension + (in width dimension) of input data x and grid_y is indexng the 3rd + dimention (in height dimension), finally results is the bilinear + interpolation value of 4 nearest corner points. + + Step 1: + Get (x, y) grid coordinates and scale to [0, H-1/W-1]. + + grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1) + grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1) + + Step 2: + Indices input data X with grid (x, y) in each [H, W] area, and bilinear + interpolate point value by 4 nearest points. + + wn ------- y_n ------- en + | | | + | d_n | + | | | + x_w --d_w-- grid--d_e-- x_e + | | | + | d_s | + | | | + ws ------- y_s ------- wn + + x_w = floor(x) // west side x coord + x_e = x_w + 1 // east side x coord + y_n = floor(y) // north side y coord + y_s = y_s + 1 // south side y coord + + d_w = grid_x - x_w // distance to west side + d_e = x_e - grid_x // distance to east side + d_n = grid_y - y_n // distance to north side + d_s = y_s - grid_y // distance to south side + + wn = X[:, :, y_n, x_w] // north-west point value + en = X[:, :, y_n, x_e] // north-east point value + ws = X[:, :, y_s, x_w] // south-east point value + es = X[:, :, y_s, x_w] // north-east point value + + output = wn * d_e * d_s + en * d_w * d_s + + ws * d_e * d_n + es * d_w * d_n + )DOC"); + } +}; + +class GridSampleOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + auto input_dims = ctx->GetInputDim("X"); + auto grid_dims = ctx->GetInputDim("Grid"); + if (ctx->HasOutput(framework::GradVarName("X"))) { + ctx->SetOutputDim(framework::GradVarName("X"), input_dims); + } + if (ctx->HasOutput(framework::GradVarName("Grid"))) { + ctx->SetOutputDim(framework::GradVarName("Grid"), grid_dims); + } + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + framework::LibraryType library_{framework::LibraryType::kPlain}; +#ifdef PADDLE_WITH_CUDA + if (platform::CanCUDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kCUDNN; + } +#endif + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), + framework::DataLayout::kAnyLayout, library_); + } +}; + +class GridSampleGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto* op = new framework::OpDesc(); + op->SetType("grid_sampler_grad"); + op->SetInput("X", Input("X")); + op->SetInput("Grid", Input("Grid")); + op->SetInput(framework::GradVarName("Output"), OutputGrad("Output")); + + op->SetAttrMap(Attrs()); + + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + op->SetOutput(framework::GradVarName("Grid"), InputGrad("Grid")); + return std::unique_ptr(op); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(grid_sampler, ops::GridSampleOp, ops::GridSampleOpMaker, + ops::GridSampleGradMaker); +REGISTER_OPERATOR(grid_sampler_grad, ops::GridSampleOpGrad); + +REGISTER_OP_CPU_KERNEL( + grid_sampler, + ops::GridSampleOpKernel, + ops::GridSampleOpKernel); +REGISTER_OP_CPU_KERNEL( + grid_sampler_grad, + ops::GridSampleGradOpKernel, + ops::GridSampleGradOpKernel); diff --git a/paddle/fluid/operators/grid_sampler_op.h b/paddle/fluid/operators/grid_sampler_op.h new file mode 100644 index 0000000000000000000000000000000000000000..e88da83e4a087662d2e7ef0e3108215bca8ec74f --- /dev/null +++ b/paddle/fluid/operators/grid_sampler_op.h @@ -0,0 +1,328 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/gather.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/hostdevice.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +template +using EigenTensor = framework::EigenTensor; + +using Array3 = Eigen::DSizes; +using Array4 = Eigen::DSizes; + +template +static inline bool isInBound(T x, T y, T x_max, T y_max) { + if (x < 0 || x > x_max || y < 0 || y > y_max) { + return false; + } + return true; +} + +template +static void CalcGridLocations(const platform::CPUDeviceContext& ctx, + const Tensor& grid, Tensor* x_w, Tensor* x_e, + Tensor* y_n, Tensor* y_s, Tensor* d_w, + Tensor* d_e, Tensor* d_n, Tensor* d_s) { + auto& place = *ctx.eigen_device(); + const int n = grid.dims()[0]; + const int h = grid.dims()[1]; + const int w = grid.dims()[2]; + const T x_max = static_cast(w - 1); + const T y_max = static_cast(h - 1); + + // split grid with shape (n, h, w, 2) into (x, y) by the 3rd Dim + Tensor grid_x, grid_y; + T* grid_x_data = grid_x.mutable_data({n, h, w}, ctx.GetPlace()); + T* grid_y_data = grid_y.mutable_data({n, h, w}, ctx.GetPlace()); + const T* grid_data = grid.data(); + for (int i = 0; i < n * h * w; i++) { + grid_x_data[i] = grid_data[2 * i]; + grid_y_data[i] = grid_data[(2 * i) + 1]; + } + + Tensor ones; + ones.mutable_data({n, h, w}, ctx.GetPlace()); + auto ones_t = EigenTensor::From(ones).setConstant(1.0); + Tensor half_xmax; + Tensor half_ymax; + half_xmax.mutable_data({n, h, w}, ctx.GetPlace()); + auto half_xmax_t = EigenTensor::From(half_xmax).setConstant(0.5 * x_max); + half_ymax.mutable_data({n, h, w}, ctx.GetPlace()); + auto half_ymax_t = EigenTensor::From(half_ymax).setConstant(0.5 * y_max); + + // scale grid to [0, h-1/w-1] + auto grid_x_t = EigenTensor::From(grid_x); + auto grid_y_t = EigenTensor::From(grid_y); + grid_x_t.device(place) = (grid_x_t + ones_t) * half_xmax_t; + grid_y_t.device(place) = (grid_y_t + ones_t) * half_ymax_t; + + // calculate coords of 4 corner points + x_w->mutable_data({n, h, w}, ctx.GetPlace()); + x_e->mutable_data({n, h, w}, ctx.GetPlace()); + y_n->mutable_data({n, h, w}, ctx.GetPlace()); + y_s->mutable_data({n, h, w}, ctx.GetPlace()); + auto x_w_t = EigenTensor::From(*x_w); + auto x_e_t = EigenTensor::From(*x_e); + auto y_n_t = EigenTensor::From(*y_n); + auto y_s_t = EigenTensor::From(*y_s); + x_w_t.device(place) = grid_x_t.floor(); + x_e_t.device(place) = x_w_t + ones_t; + y_n_t.device(place) = grid_y_t.floor(); + y_s_t.device(place) = y_n_t + ones_t; + + // calculate distances to 4 sides + d_w->mutable_data({n, h, w}, ctx.GetPlace()); + d_e->mutable_data({n, h, w}, ctx.GetPlace()); + d_n->mutable_data({n, h, w}, ctx.GetPlace()); + d_s->mutable_data({n, h, w}, ctx.GetPlace()); + auto d_w_t = EigenTensor::From(*d_w); + auto d_e_t = EigenTensor::From(*d_e); + auto d_n_t = EigenTensor::From(*d_n); + auto d_s_t = EigenTensor::From(*d_s); + d_w_t.device(place) = grid_x_t - x_w_t; + d_e_t.device(place) = x_e_t - grid_x_t; + d_n_t.device(place) = grid_y_t - y_n_t; + d_s_t.device(place) = y_s_t - grid_y_t; +} + +template +static void GetGridPointValue(const Tensor& input, Tensor* output, + const Tensor& x, const Tensor& y) { + const int n = input.dims()[0]; + const int c = input.dims()[1]; + const int h = input.dims()[2]; + const int w = input.dims()[3]; + auto x_t = EigenTensor::From(x); + auto y_t = EigenTensor::From(y); + auto output_t = EigenTensor::From(*output).setConstant((T)0); + auto input_t = EigenTensor::From(input); + + for (int i = 0; i < n; i++) { + for (int k = 0; k < h; k++) { + for (int l = 0; l < w; l++) { + if (isInBound(x_t(i, k, l), y_t(i, k, l), (T)(w - 1), (T)(h - 1))) { + for (int j = 0; j < c; j++) { + output_t(i, j, k, l) = + input_t(i, j, static_cast(round(y_t(i, k, l))), + static_cast(round(x_t(i, k, l)))); + } + } + } + } + } +} + +template +static void GatherOutputGradToInputGrad(const Tensor& output_grad, + Tensor* input_grad, const Tensor& x, + const Tensor& y, const Tensor& d1, + const Tensor& d2) { + const int n = output_grad.dims()[0]; + const int c = output_grad.dims()[1]; + const int h = output_grad.dims()[2]; + const int w = output_grad.dims()[3]; + auto x_t = EigenTensor::From(x); + auto y_t = EigenTensor::From(y); + auto d1_t = EigenTensor::From(d1); + auto d2_t = EigenTensor::From(d2); + auto input_grad_t = EigenTensor::From(*input_grad); + auto output_grad_t = EigenTensor::From(output_grad); + + for (int i = 0; i < n; i++) { + for (int k = 0; k < h; k++) { + for (int l = 0; l < w; l++) { + if (isInBound(x_t(i, k, l), y_t(i, k, l), (T)(w - 1), (T)(h - 1))) { + for (int j = 0; j < c; j++) { + input_grad_t(i, j, static_cast(round(y_t(i, k, l))), + static_cast(round(x_t(i, k, l)))) += + output_grad_t(i, j, k, l) * d1_t(i, k, l) * d2_t(i, k, l); + } + } + } + } + } +} + +template +class GridSampleOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto& place = *ctx.template device_context().eigen_device(); + auto* input = ctx.Input("X"); + auto* grid = ctx.Input("Grid"); + + const int n = input->dims()[0]; + const int c = input->dims()[1]; + const int h = input->dims()[2]; + const int w = input->dims()[3]; + + // calc locations and distances of 4 corner points + Tensor x_w, x_e, y_n, y_s; + Tensor d_w, d_e, d_n, d_s; + CalcGridLocations( + ctx.template device_context(), *grid, &x_w, + &x_e, &y_n, &y_s, &d_w, &d_e, &d_n, &d_s); + + auto* output = ctx.Output("Output"); + output->mutable_data({n, c, h, w}, ctx.GetPlace()); + math::SetConstant()( + ctx.template device_context(), output, + static_cast(0)); + + // calc 4 corner points value + Tensor v_wn, v_en, v_ws, v_es; + v_wn.mutable_data({n, c, h, w}, ctx.GetPlace()); + v_en.mutable_data({n, c, h, w}, ctx.GetPlace()); + v_ws.mutable_data({n, c, h, w}, ctx.GetPlace()); + v_es.mutable_data({n, c, h, w}, ctx.GetPlace()); + GetGridPointValue(*input, &v_wn, x_w, y_n); + GetGridPointValue(*input, &v_en, x_e, y_n); + GetGridPointValue(*input, &v_ws, x_w, y_s); + GetGridPointValue(*input, &v_es, x_e, y_s); + + auto d_w_t = EigenTensor::From(d_w); + auto d_e_t = EigenTensor::From(d_e); + auto d_n_t = EigenTensor::From(d_n); + auto d_s_t = EigenTensor::From(d_s); + auto d_w_scaled_t = + d_w_t.reshape(Array4(n, 1, h, w)).broadcast(Array4(1, c, 1, 1)); + auto d_e_scaled_t = + d_e_t.reshape(Array4(n, 1, h, w)).broadcast(Array4(1, c, 1, 1)); + auto d_n_scaled_t = + d_n_t.reshape(Array4(n, 1, h, w)).broadcast(Array4(1, c, 1, 1)); + auto d_s_scaled_t = + d_s_t.reshape(Array4(n, 1, h, w)).broadcast(Array4(1, c, 1, 1)); + auto v_wn_t = EigenTensor::From(v_wn); + auto v_en_t = EigenTensor::From(v_en); + auto v_ws_t = EigenTensor::From(v_ws); + auto v_es_t = EigenTensor::From(v_es); + auto output_t = EigenTensor::From(*output); + // bilinear interpolaetion by 4 corner points + output_t.device(place) = v_wn_t * d_e_scaled_t * d_s_scaled_t + + v_en_t * d_w_scaled_t * d_s_scaled_t + + v_ws_t * d_e_scaled_t * d_n_scaled_t + + v_es_t * d_w_scaled_t * d_n_scaled_t; + } +}; + +template +class GridSampleGradOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input = ctx.Input("X"); + auto* grid = ctx.Input("Grid"); + auto* output_grad = ctx.Input(framework::GradVarName("Output")); + + const int n = input->dims()[0]; + const int c = input->dims()[1]; + const int h = input->dims()[2]; + const int w = input->dims()[3]; + + auto* input_grad = ctx.Output(framework::GradVarName("X")); + input_grad->mutable_data({n, c, h, w}, ctx.GetPlace()); + math::SetConstant()( + ctx.template device_context(), input_grad, + static_cast(0)); + auto* grid_grad = ctx.Output(framework::GradVarName("Grid")); + grid_grad->mutable_data({n, h, w, 2}, ctx.GetPlace()); + math::SetConstant()( + ctx.template device_context(), grid_grad, + static_cast(0)); + + Tensor x_w, x_e, y_n, y_s; + Tensor d_w, d_e, d_n, d_s; + CalcGridLocations( + ctx.template device_context(), *grid, &x_w, + &x_e, &y_n, &y_s, &d_w, &d_e, &d_n, &d_s); + + // gather output grad value to input grad by corner point coords and weight + GatherOutputGradToInputGrad(*output_grad, input_grad, x_w, y_n, d_e, + d_s); + GatherOutputGradToInputGrad(*output_grad, input_grad, x_w, y_s, d_e, + d_n); + GatherOutputGradToInputGrad(*output_grad, input_grad, x_e, y_n, d_w, + d_s); + GatherOutputGradToInputGrad(*output_grad, input_grad, x_e, y_s, d_w, + d_n); + + // calc 4 corner points value + Tensor v_wn, v_en, v_ws, v_es; + v_wn.mutable_data({n, c, h, w}, ctx.GetPlace()); + v_en.mutable_data({n, c, h, w}, ctx.GetPlace()); + v_ws.mutable_data({n, c, h, w}, ctx.GetPlace()); + v_es.mutable_data({n, c, h, w}, ctx.GetPlace()); + GetGridPointValue(*input, &v_wn, x_w, y_n); + GetGridPointValue(*input, &v_en, x_e, y_n); + GetGridPointValue(*input, &v_ws, x_w, y_s); + GetGridPointValue(*input, &v_es, x_e, y_s); + auto v_wn_t = EigenTensor::From(v_wn); + auto v_en_t = EigenTensor::From(v_en); + auto v_ws_t = EigenTensor::From(v_ws); + auto v_es_t = EigenTensor::From(v_es); + + auto d_w_t = EigenTensor::From(d_w); + auto d_e_t = EigenTensor::From(d_e); + auto d_n_t = EigenTensor::From(d_n); + auto d_s_t = EigenTensor::From(d_s); + + auto output_grad_t = EigenTensor::From(*output_grad); + + Tensor grid_grad_x, grid_grad_y; + grid_grad_x.mutable_data({n, h, w}, ctx.GetPlace()); + grid_grad_y.mutable_data({n, h, w}, ctx.GetPlace()); + auto grid_grad_x_t = EigenTensor::From(grid_grad_x).setConstant(0.0); + auto grid_grad_y_t = EigenTensor::From(grid_grad_y).setConstant(0.0); + for (int i = 0; i < n; i++) { + for (int j = 0; j < c; j++) { + for (int k = 0; k < h; k++) { + for (int l = 0; l < w; l++) { + grid_grad_x_t(i, k, l) += + ((v_en_t(i, j, k, l) - v_wn_t(i, j, k, l)) * d_s_t(i, k, l) + + (v_es_t(i, j, k, l) - v_ws_t(i, j, k, l)) * d_n_t(i, k, l)) * + output_grad_t(i, j, k, l); + grid_grad_y_t(i, k, l) += + ((v_ws_t(i, j, k, l) - v_wn_t(i, j, k, l)) * d_e_t(i, k, l) + + (v_es_t(i, j, k, l) - v_en_t(i, j, k, l)) * d_w_t(i, k, l)) * + output_grad_t(i, j, k, l); + } + } + } + } + const T x_max = static_cast(w - 1); + const T y_max = static_cast(h - 1); + grid_grad_x_t = grid_grad_x_t * (x_max / (T)2); + grid_grad_y_t = grid_grad_y_t * (y_max / (T)2); + + // gather grid_grad [x, y] in 3rd Dim + T* grid_grad_data = grid_grad->data(); + T* grid_grad_x_data = grid_grad_x.data(); + T* grid_grad_y_data = grid_grad_y.data(); + for (int i = 0; i < n * h * w; i++) { + grid_grad_data[2 * i] = grid_grad_x_data[i]; + grid_grad_data[2 * i + 1] = grid_grad_y_data[i]; + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index a038bad701ba8ede3065af9f352f1f21784a50b7..1d8b1411cddf4fe16d2d00313c519cc173e1504d 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -134,7 +134,6 @@ void ListenAndServOp::RunSyncLoop( rpc_service_->ResetBarrierCounter(); while (true) { - rpc_service_->Profiler().OneStep(); // Get from multiple trainers, we don't care about the order in which // the gradients arrives, just add suffix 0~n and merge the gradient. rpc_service_->SetCond(distributed::kRequestSend); @@ -218,23 +217,26 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, framework::ProgramDesc *program, framework::Scope *recv_scope) const { VLOG(2) << "RunAsyncLoop"; - // grad name to block id - std::unordered_map grad_to_block_id; - std::unordered_map id_to_grad; - auto grad_to_block_id_str = Attr>("grad_to_block_id"); - for (const auto &grad_and_id : grad_to_block_id_str) { + DoubleFindMap grad_to_block_id; + + auto append_block_maps = [](DoubleFindMap *out_map, + const std::string &grad_and_id) { std::vector pieces; split(grad_and_id, ':', &pieces); - VLOG(3) << "after split, grad = " << pieces[0] << ", id=" << pieces[1]; + VLOG(3) << "after split, key = " << pieces[0] << ", id=" << pieces[1]; PADDLE_ENFORCE_EQ(pieces.size(), 2); - PADDLE_ENFORCE_EQ(grad_to_block_id.count(pieces[0]), 0); + PADDLE_ENFORCE_EQ(out_map->count(pieces[0]), 0); int block_id = std::stoi(pieces[1]); - grad_to_block_id[pieces[0]] = block_id; - id_to_grad[block_id] = pieces[0]; + (*out_map)[pieces[0]] = block_id; + }; + + for (const auto &grad_and_id : grad_to_block_id_str) { + append_block_maps(&grad_to_block_id, grad_and_id); } + size_t num_blocks = program->Size(); PADDLE_ENFORCE_GE(num_blocks, 2, "server program should have at least 2 blocks"); @@ -244,15 +246,22 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, block_list.push_back(blkid); } auto optimize_prepared = executor->Prepare(*program, block_list); - // execute global block if needed - if (block_list[0] == 1 && id_to_grad.count(1) == 0) { + // execute global block if needed, block id 1 in the program is global + // block if it's not bind to a grad var for it's update. + if (block_list[0] == 1 && + grad_to_block_id.find_value(static_cast(1)) == + grad_to_block_id.end()) { executor->RunPreparedContext(optimize_prepared[0].get(), recv_scope); } std::unordered_map> - grad_to_prepared_ctx; + grad_to_prepared_ctx, param_to_prepared_ctx; for (size_t i = 0; i < block_list.size(); ++i) { - grad_to_prepared_ctx[id_to_grad[block_list[i]]] = optimize_prepared[i]; + auto blkid = block_list[i]; + auto it = grad_to_block_id.find_value(blkid); + if (it != grad_to_block_id.end()) { + grad_to_prepared_ctx[it->first] = optimize_prepared[i]; + } } request_send_handler_->SetGradToPreparedCtx(&grad_to_prepared_ctx); @@ -315,6 +324,7 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, framework::Scope &recv_scope = scope.NewScope(); bool sync_mode = Attr("sync_mode"); + bool dc_sgd = Attr("dc_asgd"); auto fan_in = Attr("Fanin"); auto inputs = Inputs("X"); @@ -328,8 +338,10 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, rpc_service_.reset(new RPCSERVER_T(endpoint, fan_in)); - request_send_handler_.reset(new distributed::RequestSendHandler(sync_mode)); - request_get_handler_.reset(new distributed::RequestGetHandler(sync_mode)); + request_send_handler_.reset( + new distributed::RequestSendHandler(sync_mode, dc_sgd)); + request_get_handler_.reset( + new distributed::RequestGetHandler(sync_mode, dc_sgd)); request_prefetch_handler_.reset( new distributed::RequestPrefetchHandler(sync_mode)); request_checkpoint_handler_.reset(new distributed::RequestCheckpointHandler( @@ -443,6 +455,8 @@ class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { "a map from grad name to it's optimize block id") .SetDefault({}); AddAttr("sync_mode", "if works at sync_mode or not").SetDefault(true); + AddAttr("dc_asgd", "set to true will enable DC-ASGD training.") + .SetDefault(false); AddAttr>( kOptimizeBlocks, "Optimize blocks to run on server side.") .SetDefault({}); diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/listen_and_serv_op.h index 5f889793ab16249a4e06801090db087a089dbed1..9431978df836121baacc12ed4e1ee6b218cc7d7a 100644 --- a/paddle/fluid/operators/listen_and_serv_op.h +++ b/paddle/fluid/operators/listen_and_serv_op.h @@ -18,6 +18,7 @@ limitations under the License. */ #include #include #include +#include #include #include "paddle/fluid/framework/executor.h" @@ -37,6 +38,17 @@ constexpr char kCheckpointBlockId[] = "checkpint_block_id"; void RunServer(std::shared_ptr service); +template +class DoubleFindMap : public std::unordered_map { + public: + typename std::unordered_map::iterator find_value(TValue v) { + return std::find_if(this->begin(), this->end(), + [&v](const std::pair p) { + return p.second == v; + }); + } +}; + class ListenAndServOp : public framework::OperatorBase { public: ListenAndServOp(const std::string& type, diff --git a/paddle/fluid/operators/math/CMakeLists.txt b/paddle/fluid/operators/math/CMakeLists.txt index 77802dd102465c97369aafbaa70a0ddddb5486fe..c7c09ec153ca5df9937083e1018265a420d55bcb 100644 --- a/paddle/fluid/operators/math/CMakeLists.txt +++ b/paddle/fluid/operators/math/CMakeLists.txt @@ -76,8 +76,12 @@ endif() cc_test(concat_test SRCS concat_test.cc DEPS concat_and_split) cc_test(cpu_vec_test SRCS cpu_vec_test.cc DEPS blas cpu_info) if (NOT WIN32) -cc_library(jit_kernel - SRCS jit_kernel.cc jit_kernel_blas.cc jit_kernel_exp.cc jit_kernel_rnn.cc jit_kernel_crf_decode.cc - DEPS cpu_info cblas) -cc_test(jit_kernel_test SRCS jit_kernel_test.cc DEPS jit_kernel) + set(JIT_KERNEL_SRCS jit_kernel.cc jit_kernel_blas.cc jit_kernel_exp.cc jit_kernel_rnn.cc jit_kernel_crf_decode.cc) + set(JIT_KERNEL_DEPS cpu_info cblas gflags enforce) + if(WITH_XBYAK) + list(APPEND JIT_KERNEL_SRCS jit_gen.cc jit_code.cc) + list(APPEND JIT_KERNEL_DEPS xbyak) + endif() + cc_library(jit_kernel SRCS ${JIT_KERNEL_SRCS} DEPS ${JIT_KERNEL_DEPS}) + cc_test(jit_kernel_test SRCS jit_kernel_test.cc DEPS jit_kernel) endif() diff --git a/paddle/fluid/operators/math/cos_sim_functor.cu b/paddle/fluid/operators/math/cos_sim_functor.cu index 4e6ff5ee0a449b42762748ba1a103876beee01f2..537c7e47155fe9a12196869ceaed84fca198335b 100644 --- a/paddle/fluid/operators/math/cos_sim_functor.cu +++ b/paddle/fluid/operators/math/cos_sim_functor.cu @@ -51,7 +51,7 @@ struct CosSimDyFunctor { T* dy) const { const int block_size = 512; dim3 threads(block_size, 1); - dim3 grid(1, (rows + block_size - 1) / block_size); + dim3 grid((rows + block_size - 1) / block_size, 1); CosSimDyKernel<<>>( x_norm, y_norm, x, y, z, dz, rows, cols, dy); } diff --git a/paddle/fluid/operators/math/cross_entropy.cu b/paddle/fluid/operators/math/cross_entropy.cu index c92341ea55ea21773acba33665e267b2f1c25fe3..cb200ec8d6ea533d546f3e01a16a48c88b14f677 100644 --- a/paddle/fluid/operators/math/cross_entropy.cu +++ b/paddle/fluid/operators/math/cross_entropy.cu @@ -21,6 +21,16 @@ namespace operators { namespace math { namespace { + +__device__ __forceinline__ float real_log(float x) { return logf(x); } + +__device__ __forceinline__ double real_log(double x) { return log(x); } + +__device__ __forceinline__ platform::float16 real_log( + const platform::float16& val) { + return static_cast(logf(static_cast(val))); +} + template __global__ void CrossEntropyKernel(T* Y, const T* X, const int64_t* label, const int N, const int D, @@ -29,8 +39,8 @@ __global__ void CrossEntropyKernel(T* Y, const T* X, const int64_t* label, i += blockDim.x * gridDim.x) { PADDLE_ASSERT(label[i] >= 0 && label[i] < D || label[i] == ignore_index); Y[i] = ignore_index == label[i] - ? 0 - : -math::TolerableValue()(log(X[i * D + label[i]])); + ? static_cast(0) + : -math::TolerableValue()(real_log(X[i * D + label[i]])); } } @@ -38,12 +48,12 @@ template __global__ void SoftCrossEntropyKernel(T* Y, const T* X, const T* label, const int class_num) { int tid = threadIdx.x; - T val = 0; + T val(0); int idx = blockIdx.x * class_num + tid; int end = blockIdx.x * class_num + class_num; for (; idx < end; idx += blockDim.x) { - val += math::TolerableValue()(std::log(X[idx])) * label[idx]; + val += math::TolerableValue()(real_log(X[idx])) * label[idx]; } val = paddle::platform::reduceSum(val, tid, blockDim.x); @@ -53,8 +63,6 @@ __global__ void SoftCrossEntropyKernel(T* Y, const T* X, const T* label, } } // namespace -using Tensor = framework::Tensor; - template class CrossEntropyFunctor { public: @@ -89,6 +97,8 @@ class CrossEntropyFunctor { template class CrossEntropyFunctor; template class CrossEntropyFunctor; +template class CrossEntropyFunctor; } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/math/cross_entropy.h b/paddle/fluid/operators/math/cross_entropy.h index e8aeb5d0575ac0f6b8761e97896df73578e8a103..99a4935186e1e6f9e3bf36eb029ce3d230510117 100644 --- a/paddle/fluid/operators/math/cross_entropy.h +++ b/paddle/fluid/operators/math/cross_entropy.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/hostdevice.h" @@ -33,6 +34,26 @@ struct TolerableValue { } }; +// NOTE(dzh): float16 value clip behave different. +// 1. Our ValueClipping has a hardcore threshold 1e20 +// for float number. 1e20 will resulting in overflow in float16. +// 2. float16 should expose the the real number overflow to python. +// because mixed-training depends the inf/nan value to determine +// if the scale value will be adjusted. +// Also. In standard implementation of cross entropy, other +// framework not has the ValueClipping. +template <> +struct TolerableValue { + HOSTDEVICE platform::float16 operator()(const platform::float16& x) const { + if (platform::isfinite(x)) + return x; + else if (x > static_cast(0)) + return std::numeric_limits::max(); + else + return std::numeric_limits::min(); + } +}; + template class CrossEntropyFunctor { public: diff --git a/paddle/fluid/operators/math/fc_compute.h b/paddle/fluid/operators/math/fc_compute.h index 87220d4019fc9337fb8355172ca4f1372cfd4558..b072b4c20a171d148bd892c162436d03da404fb9 100644 --- a/paddle/fluid/operators/math/fc_compute.h +++ b/paddle/fluid/operators/math/fc_compute.h @@ -36,7 +36,7 @@ inline void FCCompute(const BlasT& blas, const int M, .template Get>(N); for (int i = 0; i < M; i++) { T* dst = Y + i * N; - vaddrelu->Compute(B, dst, dst); + vaddrelu->Compute(B, dst, dst, N); } } else { const auto& vadd = jitkernel::KernelPool::Instance() @@ -47,7 +47,7 @@ inline void FCCompute(const BlasT& blas, const int M, #endif for (int i = 0; i < M; i++) { T* dst = Y + i * N; - vadd->Compute(B, dst, dst); + vadd->Compute(B, dst, dst, N); } } } diff --git a/paddle/fluid/operators/math/jit_code.cc b/paddle/fluid/operators/math/jit_code.cc new file mode 100644 index 0000000000000000000000000000000000000000..a92e5d351e71a55bca2845ce275780950d096031 --- /dev/null +++ b/paddle/fluid/operators/math/jit_code.cc @@ -0,0 +1,103 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/math/jit_code.h" +#include "paddle/fluid/operators/math/jit_kernel.h" +#include "paddle/fluid/platform/cpu_info.h" + +namespace paddle { +namespace operators { +namespace math { +namespace jitkernel { +namespace gen { + +using namespace platform::jit; // NOLINT + +bool VVVJitCode::init(int d) { + // It's not necessary to use avx512 since it would slow down the frequency + // and this kernel is not compute bound. + return MayIUse(avx); +} + +void VVVJitCode::generate() { + // do not need push stack, and do not need save avx512reg if do not use avx512 + int offset = 0; + if (with_relu_) { + vxorps(ymm_zero, ymm_zero, ymm_zero); + } + for (int i = 0; i < num_ / AVX_FLOAT_BLOCK; ++i) { + vmovups(ymm_src1, ptr[param1 + offset]); + vmovups(ymm_src2, ptr[param2 + offset]); + if (type_ == operand_type::mul) { + vmulps(ymm_dst, ymm_src1, ymm_src2); + } else if (type_ == operand_type::add) { + vaddps(ymm_dst, ymm_src1, ymm_src2); + } + if (with_relu_) { + vmaxps(ymm_dst, ymm_zero, ymm_dst); + } + vmovups(ptr[param3 + offset], ymm_dst); + offset += sizeof(float) * AVX_FLOAT_BLOCK; + } + int rest = num_ % AVX_FLOAT_BLOCK; + if (rest >= 4) { + vmovups(xmm_src1, ptr[param1 + offset]); + vmovups(xmm_src2, ptr[param2 + offset]); + if (type_ == operand_type::mul) { + vmulps(xmm_dst, xmm_src1, xmm_src2); + } else if (type_ == operand_type::add) { + vaddps(xmm_dst, xmm_src1, xmm_src2); + } + if (with_relu_) { + vmaxps(xmm_dst, xmm_zero, xmm_dst); + } + vmovups(ptr[param3 + offset], xmm_dst); + offset += sizeof(float) * 4; + rest -= 4; + } + if (rest >= 2) { + vmovq(xmm_src1, ptr[param1 + offset]); + vmovq(xmm_src2, ptr[param2 + offset]); + if (type_ == operand_type::mul) { + vmulps(xmm_dst, xmm_src1, xmm_src2); + } else if (type_ == operand_type::add) { + vaddps(xmm_dst, xmm_src1, xmm_src2); + } + if (with_relu_) { + vmaxps(xmm_dst, xmm_zero, xmm_dst); + } + vmovq(ptr[param3 + offset], xmm_dst); + offset += sizeof(float) * 2; + rest -= 2; + } + if (rest > 0) { + vmovss(xmm_src1, ptr[param1 + offset]); + vmovss(xmm_src2, ptr[param2 + offset]); + if (type_ == operand_type::mul) { + vmulss(xmm_dst, xmm_src1, xmm_src2); + } else if (type_ == operand_type::add) { + vaddss(xmm_dst, xmm_src1, xmm_src2); + } + if (with_relu_) { + vmaxps(xmm_dst, xmm_zero, xmm_dst); + } + vmovss(ptr[param3 + offset], xmm_dst); + } + ret(); +} +} // namespace gen +} // namespace jitkernel +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/jit_code.h b/paddle/fluid/operators/math/jit_code.h new file mode 100644 index 0000000000000000000000000000000000000000..73692ebc67c71f6190f2d18bd50071a28a35d4c9 --- /dev/null +++ b/paddle/fluid/operators/math/jit_code.h @@ -0,0 +1,79 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "paddle/fluid/operators/math/jit_gen.h" +namespace paddle { +namespace operators { +namespace math { +namespace jitkernel { +namespace gen { + +using reg64_t = const Xbyak::Reg64; +using reg32_t = const Xbyak::Reg32; +using xmm_t = const Xbyak::Xmm; +using ymm_t = const Xbyak::Ymm; +using zmm_t = const Xbyak::Zmm; +using Label = Xbyak::Label; + +// function: vec = Operand(vec, vec) (maybe with relu) +typedef enum { mul = 0, add } operand_type; + +class VVVJitCode : public JitCode { + public: + const char* name() const override { + std::string base = "VVVJitCode"; + if (type_ == operand_type::mul) { + base += "_Mul"; + } else if (type_ == operand_type::add) { + base += "_Add"; + } + base += (with_relu_ ? "_relu" : ""); + return base.c_str(); + } + explicit VVVJitCode(int d, operand_type type, bool with_relu, + size_t code_size = 256 * 1024, void* code_ptr = nullptr) + : JitCode(code_size, code_ptr), + num_(d), + type_(type), + with_relu_(with_relu) {} + static bool init(int d); + void generate() override; + + private: + int num_; + operand_type type_; + bool with_relu_; + reg64_t param1{abi_param1}; + reg64_t param2{abi_param2}; + reg64_t param3{abi_param3}; + + xmm_t xmm_src1 = xmm_t(0); + xmm_t xmm_src2 = xmm_t(1); + xmm_t xmm_dst = xmm_t(1); + xmm_t xmm_zero = xmm_t(2); + + ymm_t ymm_src1 = ymm_t(0); + ymm_t ymm_src2 = ymm_t(1); + ymm_t ymm_dst = ymm_t(1); + ymm_t ymm_zero = ymm_t(2); +}; + +} // namespace gen +} // namespace jitkernel +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/jit_gen.cc b/paddle/fluid/operators/math/jit_gen.cc new file mode 100644 index 0000000000000000000000000000000000000000..6af39518ed926554c8c839bba701d3827923dba0 --- /dev/null +++ b/paddle/fluid/operators/math/jit_gen.cc @@ -0,0 +1,90 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/math/jit_gen.h" +#include +#include +#include +#include "paddle/fluid/platform/cpu_info.h" + +DEFINE_bool(dump_jitcode, false, "Whether to dump the jitcode to file"); + +namespace paddle { +namespace operators { +namespace math { +namespace jitkernel { +namespace gen { + +constexpr Xbyak::Operand::Code g_abi_regs[] = { + Xbyak::Operand::RBX, Xbyak::Operand::RBP, Xbyak::Operand::R12, + Xbyak::Operand::R13, Xbyak::Operand::R14, Xbyak::Operand::R15}; + +constexpr int num_g_abi_regs = sizeof(g_abi_regs) / sizeof(g_abi_regs[0]); + +void JitCode::preCode() { + for (int i = 0; i < num_g_abi_regs; ++i) { + push(Xbyak::Reg64(g_abi_regs[i])); + } + if (platform::jit::MayIUse(platform::jit::avx512f)) { + mov(reg_EVEX_max_8b_offt, 2 * EVEX_max_8b_offt); + } +} + +void JitCode::postCode() { + for (int i = 0; i < num_g_abi_regs; ++i) { + pop(Xbyak::Reg64(g_abi_regs[num_g_abi_regs - 1 - i])); + } + ret(); +} + +void JitCode::dumpCode(const Xbyak::uint8 *code) const { + if (code) { + static int counter = 0; + std::ostringstream filename; + filename << "paddle_jitcode_" << name() << "." << counter << ".bin"; + counter++; + std::ofstream fout(filename.str(), std::ios::out); + if (fout.is_open()) { + fout.write(reinterpret_cast(code), getSize()); + fout.close(); + } + } +} + +Xbyak::Address JitCode::EVEX_compress_addr(Xbyak::Reg64 base, int offt, + bool bcast) { + int scale = 0; + if (EVEX_max_8b_offt <= offt && offt < 3 * EVEX_max_8b_offt) { + offt = offt - 2 * EVEX_max_8b_offt; + scale = 1; + } else if (3 * EVEX_max_8b_offt <= offt && offt < 5 * EVEX_max_8b_offt) { + offt = offt - 4 * EVEX_max_8b_offt; + scale = 2; + } + auto re = Xbyak::RegExp() + base + offt; + if (scale) { + re = re + reg_EVEX_max_8b_offt * scale; + } + if (bcast) { + return zword_b[re]; + } else { + return zword[re]; + } +} + +} // namespace gen +} // namespace jitkernel +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/jit_gen.h b/paddle/fluid/operators/math/jit_gen.h new file mode 100644 index 0000000000000000000000000000000000000000..6abf3434cc8d8f6ab2838ef822a4f6b948331802 --- /dev/null +++ b/paddle/fluid/operators/math/jit_gen.h @@ -0,0 +1,80 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include "paddle/fluid/platform/macros.h" + +#define XBYAK_USE_MMAP_ALLOCATOR +#include "xbyak/xbyak.h" +#include "xbyak/xbyak_util.h" + +DECLARE_bool(dump_jitcode); + +namespace paddle { +namespace operators { +namespace math { +namespace jitkernel { +namespace gen { + +#define DECLARE_JIT_CODE(codename) \ + const char *name() const override { return #codename; } + +// Application Binary Interface +constexpr Xbyak::Operand::Code abi_param1(Xbyak::Operand::RDI), + abi_param2(Xbyak::Operand::RSI), abi_param3(Xbyak::Operand::RDX), + abi_param4(Xbyak::Operand::RCX), abi_not_param1(Xbyak::Operand::RCX); + +class JitCode : public Xbyak::CodeGenerator { + public: + explicit JitCode(size_t code_size = 256 * 1024, void *code_ptr = nullptr) + : Xbyak::CodeGenerator(code_size, code_ptr) {} + + virtual ~JitCode() {} + virtual const char *name() const = 0; + virtual void generate() = 0; + + template + const FUNC getCode() { + this->generate(); + const Xbyak::uint8 *code = CodeGenerator::getCode(); + if (FLAGS_dump_jitcode) { + this->dumpCode(code); + } + return reinterpret_cast(code); + } + DISABLE_COPY_AND_ASSIGN(JitCode); + + protected: + Xbyak::Reg64 param1{abi_param1}; + const int EVEX_max_8b_offt = 0x200; + const Xbyak::Reg64 reg_EVEX_max_8b_offt = rbp; + + void preCode(); + void postCode(); + void dumpCode(const Xbyak::uint8 *code) const; + void L(const char *label) { Xbyak::CodeGenerator::L(label); } + void L(const Xbyak::Label &label) { Xbyak::CodeGenerator::L(label); } + // Enhanced vector extension + Xbyak::Address EVEX_compress_addr(Xbyak::Reg64 base, int offt, + bool bcast = false); +}; + +} // namespace gen +} // namespace jitkernel +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/jit_kernel.h b/paddle/fluid/operators/math/jit_kernel.h index 48e180b1fd43b06cc13f7a4b00c73aff2eb940ac..04e0b81d3e7c696ac2f5ee78db90fb3c89ab345d 100644 --- a/paddle/fluid/operators/math/jit_kernel.h +++ b/paddle/fluid/operators/math/jit_kernel.h @@ -39,6 +39,7 @@ class Kernel { public: Kernel() = default; virtual ~Kernel() = default; + // TODO(TJ): below members should be deprecated. int num_{0}; int end_{0}; int rest_{0}; @@ -64,32 +65,32 @@ class KernelPool { template class VMulKernel : public Kernel { public: - virtual void Compute(const T *x, const T *y, T *z) const = 0; + void (*Compute)(const T *, const T *, T *, int); }; template class VAddKernel : public Kernel { public: - virtual void Compute(const T *x, const T *y, T *z) const = 0; + void (*Compute)(const T *, const T *, T *, int); }; template -class VScalKernel : public Kernel { +class VAddReluKernel : public Kernel { public: - virtual void Compute(const T a, const T *x, T *y) const = 0; - virtual void Compute(const T a, T *x) const = 0; + void (*Compute)(const T *, const T *, T *, int); }; template -class VAddBiasKernel : public Kernel { +class VScalKernel : public Kernel { public: virtual void Compute(const T a, const T *x, T *y) const = 0; + virtual void Compute(const T a, T *x) const = 0; }; template -class VAddReluKernel : public Kernel { +class VAddBiasKernel : public Kernel { public: - virtual void Compute(const T *x, const T *y, T *z) const = 0; + virtual void Compute(const T a, const T *x, T *y) const = 0; }; template diff --git a/paddle/fluid/operators/math/jit_kernel_blas.cc b/paddle/fluid/operators/math/jit_kernel_blas.cc index c88b17b012d1b9cd59220f6b37cb2ecb8a1551a4..f976953a245e424e6cb26bbf1cff2f120f84c133 100644 --- a/paddle/fluid/operators/math/jit_kernel_blas.cc +++ b/paddle/fluid/operators/math/jit_kernel_blas.cc @@ -15,6 +15,12 @@ limitations under the License. */ #include "paddle/fluid/operators/math/jit_kernel.h" #include #include "paddle/fluid/operators/math/jit_kernel_macro.h" +#include "paddle/fluid/platform/enforce.h" + +#ifdef PADDLE_WITH_XBYAK +#include "paddle/fluid/operators/math/jit_code.h" +#endif + #ifdef PADDLE_WITH_MKLML #include "paddle/fluid/platform/dynload/mklml.h" #endif @@ -27,121 +33,204 @@ namespace paddle { namespace operators { namespace math { namespace jitkernel { - namespace jit = platform::jit; -/* VMUL JitKernel */ -template -class VMulKernelImpl : public VMulKernel { - public: - explicit VMulKernelImpl(int d) : VMulKernel() { this->num_ = d; } - void Compute(const T* x, const T* y, T* z) const override { - for (int i = 0; i < this->num_; ++i) { - z[i] = x[i] * y[i]; - } +template +void VMulRefer(const T* x, const T* y, T* z, int n) { + for (int i = 0; i < n; ++i) { + z[i] = x[i] * y[i]; } -}; +} -#ifdef PADDLE_WITH_MKLML -#define MKL_FLOAT(isa, block) \ - template <> \ - void VMulKernelImpl::Compute( \ - const float* x, const float* y, float* z) const { \ - platform::dynload::vsMul(this->num_, x, y, z); \ +template +void VAddRefer(const T* x, const T* y, T* z, int n) { + for (int i = 0; i < n; ++i) { + z[i] = x[i] + y[i]; } +} -#define MKL_DOUBLE(isa, block) \ - template <> \ - void VMulKernelImpl::Compute( \ - const double* x, const double* y, double* z) const { \ - platform::dynload::vdMul(this->num_, x, y, z); \ +template +void VAddReluRefer(const T* x, const T* y, T* z, int n) { + for (int i = 0; i < n; ++i) { + z[i] = x[i] + y[i]; + z[i] = z[i] > 0 ? z[i] : 0; } +} -FOR_EACH_ISA(MKL_FLOAT, kGT16); -FOR_EACH_ISA_BLOCK(MKL_DOUBLE); +#ifdef PADDLE_WITH_MKLML +template +void VMulMKL(const T* x, const T* y, T* z, int n); + +template <> +void VMulMKL(const float* x, const float* y, float* z, int n) { + platform::dynload::vsMul(n, x, y, z); +} + +template <> +void VMulMKL(const double* x, const double* y, double* z, int n) { + platform::dynload::vdMul(n, x, y, z); +} + +template +void VAddMKL(const T* x, const T* y, T* z, int n); + +template <> +void VAddMKL(const float* x, const float* y, float* z, int n) { + platform::dynload::vsAdd(n, x, y, z); +} + +template <> +void VAddMKL(const double* x, const double* y, double* z, int n) { + platform::dynload::vdAdd(n, x, y, z); +} #endif -#define INTRI8_FLOAT(isa) \ - template <> \ - void VMulKernelImpl::Compute( \ - const float* x, const float* y, float* z) const { \ - __m256 tmpx, tmpy; \ - tmpx = _mm256_loadu_ps(x); \ - tmpy = _mm256_loadu_ps(y); \ - tmpx = _mm256_mul_ps(tmpx, tmpy); \ - _mm256_storeu_ps(z, tmpx); \ +#define DECLARE_STATIC_FUNC \ + static inline std::string name(int d) { \ + PADDLE_THROW("DType should be either float or double"); \ + } \ + static inline bool useJIT(int d) { return false; } \ + static inline bool useMKL(int d) { return false; } + +/* VMUL JitKernel */ +template +class VMulKernelImpl : public VMulKernel { + public: + DECLARE_STATIC_FUNC; + explicit VMulKernelImpl(int d) : VMulKernel() { +#ifdef PADDLE_WITH_XBYAK + if (useJIT(d)) { + // roughly estimate the size of code + size_t sz = 96 + d / AVX_FLOAT_BLOCK * 4 * 8; + jitcode_.reset(new gen::VVVJitCode(d, gen::operand_type::mul, false, + sz > 4096 ? sz : 4096)); + this->Compute = + jitcode_->getCode(); + return; + } +#endif +#ifdef PADDLE_WITH_MKLML + if (useMKL(d)) { + this->Compute = VMulMKL; + return; + } +#endif + this->Compute = VMulRefer; } -// avx > for > mkl -#ifdef __AVX__ -INTRI8_FLOAT(jit::avx); +#ifdef PADDLE_WITH_XBYAK + + private: + std::unique_ptr jitcode_{nullptr}; #endif -#ifdef __AVX2__ -INTRI8_FLOAT(jit::avx2); +}; + +#ifdef PADDLE_WITH_XBYAK +template <> +bool VMulKernelImpl::useJIT(int d) { + return gen::VVVJitCode::init(d); +} #endif -#ifdef __AVX512F__ -INTRI8_FLOAT(jit::avx512f); + +#ifdef PADDLE_WITH_MKLML +template <> +bool VMulKernelImpl::useMKL(int d) { + return jit::MayIUse(jit::avx512f) && d > 512; +} + +template <> +bool VMulKernelImpl::useMKL(int d) { + return true; +} #endif -// TODO(TJ): eq16 test and complete avx512 -#undef INTRI8_FLOAT -#undef MKL_FLOAT -#undef MKL_DOUBLE -/* VADD JitKernel */ -template +/* VAdd JitKernel */ +template class VAddKernelImpl : public VAddKernel { public: - explicit VAddKernelImpl(int d) : VAddKernel() { this->num_ = d; } - void Compute(const T* x, const T* y, T* z) const override { - for (int i = 0; i < this->num_; ++i) { - z[i] = x[i] + y[i]; + DECLARE_STATIC_FUNC; + explicit VAddKernelImpl(int d) : VAddKernel() { +#ifdef PADDLE_WITH_XBYAK + if (useJIT(d)) { + size_t sz = 96 + d / AVX_FLOAT_BLOCK * 4 * 8; + jitcode_.reset(new gen::VVVJitCode(d, gen::operand_type::add, false, + sz > 4096 ? sz : 4096)); + this->Compute = + jitcode_->getCode(); + return; + } +#endif +#ifdef PADDLE_WITH_MKLML + if (useMKL(d)) { + this->Compute = VAddMKL; + return; } +#endif + this->Compute = VAddRefer; } +#ifdef PADDLE_WITH_XBYAK + + private: + std::unique_ptr jitcode_{nullptr}; +#endif }; +#ifdef PADDLE_WITH_XBYAK +template <> +bool VAddKernelImpl::useJIT(int d) { + return gen::VVVJitCode::init(d); +} +#endif + #ifdef PADDLE_WITH_MKLML -#define MKL_FLOAT(isa, block) \ - template <> \ - void VAddKernelImpl::Compute( \ - const float* x, const float* y, float* z) const { \ - platform::dynload::vsAdd(this->num_, x, y, z); \ - } +template <> +bool VAddKernelImpl::useMKL(int d) { + return d > 512; +} + +template <> +bool VAddKernelImpl::useMKL(int d) { + return true; +} +#endif -#define MKL_DOUBLE(isa, block) \ - template <> \ - void VAddKernelImpl::Compute( \ - const double* x, const double* y, double* z) const { \ - platform::dynload::vdAdd(this->num_, x, y, z); \ +/* VAddRelu JitKernel */ +template +class VAddReluKernelImpl : public VAddReluKernel { + public: + DECLARE_STATIC_FUNC; + explicit VAddReluKernelImpl(int d) : VAddReluKernel() { +#ifdef PADDLE_WITH_XBYAK + if (useJIT(d)) { + size_t sz = 96 + d / AVX_FLOAT_BLOCK * 4 * 8; + jitcode_.reset(new gen::VVVJitCode(d, gen::operand_type::add, true, + sz > 4096 ? sz : 4096)); + this->Compute = + jitcode_->getCode(); + return; + } +#endif + this->Compute = VAddReluRefer; } +#ifdef PADDLE_WITH_XBYAK -FOR_EACH_ISA(MKL_FLOAT, kGT16); -FOR_EACH_ISA_BLOCK(MKL_DOUBLE); + private: + std::unique_ptr jitcode_{nullptr}; #endif +}; -#define INTRI8_FLOAT(isa) \ - template <> \ - void VAddKernelImpl::Compute( \ - const float* x, const float* y, float* z) const { \ - __m256 tmpx, tmpy; \ - tmpx = _mm256_loadu_ps(x); \ - tmpy = _mm256_loadu_ps(y); \ - tmpx = _mm256_add_ps(tmpx, tmpy); \ - _mm256_storeu_ps(z, tmpx); \ - } -#ifdef __AVX__ -INTRI8_FLOAT(jit::avx); -#endif -#ifdef __AVX2__ -INTRI8_FLOAT(jit::avx2); +#ifdef PADDLE_WITH_XBYAK +template <> +bool VAddReluKernelImpl::useJIT(int d) { + return gen::VVVJitCode::init(d); +} #endif -#ifdef __AVX512F__ -INTRI8_FLOAT(jit::avx512f); -#endif -// TODO(TJ): eq16 test and complete avx512 -#undef INTRI8_FLOAT -#undef MKL_FLOAT -#undef MKL_DOUBLE +#undef DECLARE_STATIC_FUNC + +REGISTER_JITKERNEL(vmul, VMulKernel); +REGISTER_JITKERNEL(vadd, VAddKernel); +REGISTER_JITKERNEL(vaddrelu, VAddReluKernel); /* VSCAL JitKernel */ template @@ -378,100 +467,10 @@ class VIdentityKernelImpl : public VIdentityKernel { void Compute(const T* x, T* y) const override {} }; -/* VAddRelu JitKernel */ -template -class VAddReluKernelImpl : public VAddReluKernel { - public: - explicit VAddReluKernelImpl(int d) : VAddReluKernel() { this->num_ = d; } - void Compute(const T* x, const T* y, T* z) const override { - for (int i = 0; i < this->num_; ++i) { - z[i] = x[i] + y[i]; - z[i] = z[i] > 0 ? z[i] : 0; - } - } -}; - -#define INTRI8_FLOAT(isa) \ - template <> \ - void VAddReluKernelImpl::Compute( \ - const float* x, const float* y, float* z) const { \ - __m256 tmpx = _mm256_loadu_ps(x); \ - __m256 tmpy = _mm256_loadu_ps(y); \ - tmpy = _mm256_add_ps(tmpx, tmpy); \ - tmpy = _mm256_max_ps(tmpy, _mm256_setzero_ps()); \ - _mm256_storeu_ps(z, tmpy); \ - } - -#define INTRI16_FLOAT(isa) \ - template <> \ - void VAddReluKernelImpl::Compute( \ - const float* x, const float* y, float* z) const { \ - __m256 zeros = _mm256_setzero_ps(); \ - __m256 tmp0 = _mm256_loadu_ps(x); \ - __m256 tmp1 = _mm256_loadu_ps(y); \ - tmp0 = _mm256_add_ps(tmp0, tmp1); \ - tmp0 = _mm256_max_ps(tmp0, zeros); \ - tmp1 = _mm256_loadu_ps(x + 8); \ - __m256 tmp2 = _mm256_loadu_ps(y + 8); \ - tmp1 = _mm256_add_ps(tmp1, tmp2); \ - tmp1 = _mm256_max_ps(tmp1, zeros); \ - _mm256_storeu_ps(z, tmp0); \ - _mm256_storeu_ps(z + 8, tmp1); \ - } - -#define INTRI_COMMON_FLOAT(isa, block) \ - template <> \ - VAddReluKernelImpl::VAddReluKernelImpl(int d) \ - : VAddReluKernel() { \ - this->num_ = d; \ - this->end_ = d - d % AVX_FLOAT_BLOCK; \ - this->rest_ = d - this->end_; \ - } \ - template <> \ - void VAddReluKernelImpl::Compute( \ - const float* x, const float* y, float* z) const { \ - __m256 zeros = _mm256_setzero_ps(); \ - for (int i = 0; i < this->end_; i += AVX_FLOAT_BLOCK) { \ - __m256 tmpx = _mm256_loadu_ps(x + i); \ - __m256 tmpy = _mm256_loadu_ps(y + i); \ - tmpy = _mm256_add_ps(tmpx, tmpy); \ - tmpy = _mm256_max_ps(tmpy, zeros); \ - _mm256_storeu_ps(z + i, tmpy); \ - } \ - for (int i = this->end_; i < this->num_; ++i) { \ - z[i] = x[i] + y[i]; \ - z[i] = z[i] > 0 ? z[i] : 0; \ - } \ - } - -#ifdef __AVX__ -INTRI8_FLOAT(jit::avx); -INTRI16_FLOAT(jit::avx); -INTRI_COMMON_FLOAT(jit::avx, kGT16); -#endif -#ifdef __AVX2__ -INTRI8_FLOAT(jit::avx2); -INTRI16_FLOAT(jit::avx2); -INTRI_COMMON_FLOAT(jit::avx2, kGT16); -#endif -#ifdef __AVX512F__ -// TODO(TJ): refine avx512 -INTRI8_FLOAT(jit::avx512f); -INTRI16_FLOAT(jit::avx512f); -INTRI_COMMON_FLOAT(jit::avx512f, kGT16); -#endif - -#undef INTRI8_FLOAT -#undef INTRI16_FLOAT -#undef INTRI_COMMON_FLOAT - -REGISTER_JITKERNEL(vmul, VMulKernel); -REGISTER_JITKERNEL(vadd, VAddKernel); -REGISTER_JITKERNEL(vscal, VScalKernel); -REGISTER_JITKERNEL(vaddb, VAddBiasKernel); -REGISTER_JITKERNEL(vrelu, VReluKernel); -REGISTER_JITKERNEL(vaddrelu, VAddReluKernel); -REGISTER_JITKERNEL(videntity, VIdentityKernel); +REGISTER_JITKERNEL_DEPRECATED(vscal, VScalKernel); +REGISTER_JITKERNEL_DEPRECATED(vaddb, VAddBiasKernel); +REGISTER_JITKERNEL_DEPRECATED(vrelu, VReluKernel); +REGISTER_JITKERNEL_DEPRECATED(videntity, VIdentityKernel); } // namespace jitkernel } // namespace math diff --git a/paddle/fluid/operators/math/jit_kernel_crf_decode.cc b/paddle/fluid/operators/math/jit_kernel_crf_decode.cc index e481d1921a7dc4fd6da3fffbc3959eafa7b4b461..a4861c347e44ad86a066861d3375b556302a84bc 100644 --- a/paddle/fluid/operators/math/jit_kernel_crf_decode.cc +++ b/paddle/fluid/operators/math/jit_kernel_crf_decode.cc @@ -288,7 +288,7 @@ INTRIAVX512_FLOAT(kGT16); #undef INIT_ALPHA #undef UPDATE_ALPHA -REGISTER_JITKERNEL(crf_decode, CRFDecodeKernel); +REGISTER_JITKERNEL_DEPRECATED(crf_decode, CRFDecodeKernel); } // namespace jitkernel } // namespace math diff --git a/paddle/fluid/operators/math/jit_kernel_exp.cc b/paddle/fluid/operators/math/jit_kernel_exp.cc index c4247580f491a7ca26259528ca74dd92e35785a9..d7c177e6782e19e199542e10e1d62587ee0df4cf 100644 --- a/paddle/fluid/operators/math/jit_kernel_exp.cc +++ b/paddle/fluid/operators/math/jit_kernel_exp.cc @@ -250,7 +250,7 @@ INTRI16_FLOAT(jit::avx512f, detail::ExpAVX2); #undef MKL_FLOAT #undef MKL_DOUBLE -REGISTER_JITKERNEL(vexp, VExpKernel); +REGISTER_JITKERNEL_DEPRECATED(vexp, VExpKernel); /* VSigmoid JitKernel */ template @@ -396,7 +396,7 @@ INTRI16_FLOAT(jit::avx512f, detail::ExpAVX2); #undef INTRI_GT16_FLOAT #undef INTRI_VSIGMOID -REGISTER_JITKERNEL(vsigmoid, VSigmoidKernel); +REGISTER_JITKERNEL_DEPRECATED(vsigmoid, VSigmoidKernel); /* VTanh JitKernel */ template @@ -531,7 +531,7 @@ INTRI16_FLOAT(jit::avx512f, detail::ExpAVX2); #undef INTRI_GT16_FLOAT #undef INTRI_VTANH -REGISTER_JITKERNEL(vtanh, VTanhKernel); +REGISTER_JITKERNEL_DEPRECATED(vtanh, VTanhKernel); #undef JITKERNEL_NEW_ACT_IMPL diff --git a/paddle/fluid/operators/math/jit_kernel_macro.h b/paddle/fluid/operators/math/jit_kernel_macro.h index d8e55f2673560ff6afa34376b73275b57a8ceea1..a8169ea48ae3eee5a8cba291be4496c4c6074221 100644 --- a/paddle/fluid/operators/math/jit_kernel_macro.h +++ b/paddle/fluid/operators/math/jit_kernel_macro.h @@ -21,8 +21,71 @@ namespace operators { namespace math { namespace jitkernel { -namespace jit = platform::jit; +#define JITKERNEL_DEFINE_NAME(ker_key, ker_class) \ + template <> \ + std::string ker_class##Impl::name(int d) { \ + std::string key(#ker_key "f"); \ + if (useJIT(d)) { \ + /* only jit code need record d*/ \ + return key + "jit" + std::to_string(d); \ + } else if (useMKL(d)) { \ + return key + "mkl"; \ + } else { \ + return key + "any"; \ + } \ + } \ + template <> \ + std::string ker_class##Impl::name(int d) { \ + std::string key(#ker_key "d"); \ + /* jit code do not support double yet*/ \ + if (useMKL(d)) { \ + return key + "mkl"; \ + } else { \ + return key + "any"; \ + } \ + } + +#define JITKERNEL_DECLARE(ker_class, ker_dtype) \ + template <> \ + std::shared_ptr> \ + KernelPool::Get, int>(int d) + +#define JITKERNEL_FIND_KEY(ker_class, ker_dtype) \ + std::string key = ker_class##Impl::name(d) + +#define JITKERNEL_IMPL(ker_class, ker_dtype) \ + p = std::dynamic_pointer_cast>( \ + std::make_shared>(d)) + +#define REGISTER_JITKERNEL_WITH_DTYPE(ker_class, ker_dtype, marco_declare, \ + macro_find_key, macro_impl) \ + marco_declare(ker_class, ker_dtype) { \ + macro_find_key(ker_class, ker_dtype); \ + if (kers_.find(key) == kers_.end()) { \ + std::shared_ptr> p; \ + macro_impl(ker_class, ker_dtype); \ + kers_.insert({key, std::dynamic_pointer_cast(p)}); \ + return p; \ + } \ + return std::dynamic_pointer_cast>( \ + kers_.at(key)); \ + } +#define REGISTER_JITKERNEL_ARGS(ker_key, ker_class, marco_define_name, \ + marco_declare, macro_find_key, macro_impl) \ + marco_define_name(ker_key, ker_class); \ + REGISTER_JITKERNEL_WITH_DTYPE(ker_class, float, JITKERNEL_DECLARE, \ + JITKERNEL_FIND_KEY, JITKERNEL_IMPL); \ + REGISTER_JITKERNEL_WITH_DTYPE(ker_class, double, JITKERNEL_DECLARE, \ + JITKERNEL_FIND_KEY, JITKERNEL_IMPL) + +#define REGISTER_JITKERNEL(ker_key, ker_class) \ + REGISTER_JITKERNEL_ARGS(ker_key, ker_class, JITKERNEL_DEFINE_NAME, \ + JITKERNEL_DECLARE, JITKERNEL_FIND_KEY, \ + JITKERNEL_IMPL) + +namespace jit = platform::jit; +// TODO(TJ): below defines are deprecated, would be remove recently #define SEARCH_BLOCK(macro_, ker, dtype, isa) \ if (d < AVX_FLOAT_BLOCK) { \ macro_(ker, dtype, isa, kLT8); \ @@ -47,44 +110,42 @@ namespace jit = platform::jit; SEARCH_BLOCK(macro_, ker, dtype, jit::isa_any); \ } -#define JITKERNEL_DECLARE(ker_class, ker_dtype) \ - template <> \ - std::shared_ptr> \ - KernelPool::Get, int>(int d) - #define JITKERNEL_KEY(ker_key, dtype_key) \ #ker_key #dtype_key + std::to_string(d) -#define JITKERNEL_NEW_IMPL(ker, dtype, isa, k) \ - p = std::dynamic_pointer_cast>( \ +#define JITKERNEL_NEW_IMPL_DEPRECATED(ker, dtype, isa, k) \ + p = std::dynamic_pointer_cast>( \ std::make_shared>(d)) -#define JITKERNEL_WITH_DTYPE(ker_key, ker_class, ker_dtype, dtype_key, \ - marco_declare, macro_key, macro_impl) \ - marco_declare(ker_class, ker_dtype) { \ - std::string key = macro_key(ker_key, dtype_key); \ - if (kers_.find(key) == kers_.end()) { \ - std::shared_ptr> p; \ - SEARCH_ISA_BLOCK(macro_impl, ker_class, ker_dtype); \ - kers_.insert({key, std::dynamic_pointer_cast(p)}); \ - return p; \ - } \ - return std::dynamic_pointer_cast>( \ - kers_.at(key)); \ +#define JITKERNEL_WITH_DTYPE_DEPRECATED(ker_key, ker_class, ker_dtype, \ + dtype_key, marco_declare, macro_key, \ + macro_impl) \ + marco_declare(ker_class, ker_dtype) { \ + std::string key = macro_key(ker_key, dtype_key); \ + if (kers_.find(key) == kers_.end()) { \ + std::shared_ptr> p; \ + SEARCH_ISA_BLOCK(macro_impl, ker_class, ker_dtype); \ + kers_.insert({key, std::dynamic_pointer_cast(p)}); \ + return p; \ + } \ + return std::dynamic_pointer_cast>( \ + kers_.at(key)); \ } -#define REGISTER_JITKERNEL(ker_key, ker_class) \ - JITKERNEL_WITH_DTYPE(ker_key, ker_class, float, f, JITKERNEL_DECLARE, \ - JITKERNEL_KEY, JITKERNEL_NEW_IMPL); \ - JITKERNEL_WITH_DTYPE(ker_key, ker_class, double, d, JITKERNEL_DECLARE, \ - JITKERNEL_KEY, JITKERNEL_NEW_IMPL) - -#define REGISTER_JITKERNEL_ARGS(ker_key, ker_class, marco_declare, macro_key, \ - macro_impl) \ - JITKERNEL_WITH_DTYPE(ker_key, ker_class, float, f, marco_declare, macro_key, \ - macro_impl); \ - JITKERNEL_WITH_DTYPE(ker_key, ker_class, double, d, marco_declare, \ - macro_key, macro_impl) +#define REGISTER_JITKERNEL_DEPRECATED(ker_key, ker_class) \ + JITKERNEL_WITH_DTYPE_DEPRECATED(ker_key, ker_class, float, f, \ + JITKERNEL_DECLARE, JITKERNEL_KEY, \ + JITKERNEL_NEW_IMPL_DEPRECATED); \ + JITKERNEL_WITH_DTYPE_DEPRECATED(ker_key, ker_class, double, d, \ + JITKERNEL_DECLARE, JITKERNEL_KEY, \ + JITKERNEL_NEW_IMPL_DEPRECATED) + +#define REGISTER_JITKERNEL_ARGS_DEPRECATED(ker_key, ker_class, marco_declare, \ + macro_key, macro_impl) \ + JITKERNEL_WITH_DTYPE_DEPRECATED(ker_key, ker_class, float, f, marco_declare, \ + macro_key, macro_impl); \ + JITKERNEL_WITH_DTYPE_DEPRECATED(ker_key, ker_class, double, d, \ + marco_declare, macro_key, macro_impl) #define FOR_EACH_ISA(macro_, block) \ macro_(jit::avx512f, block); \ diff --git a/paddle/fluid/operators/math/jit_kernel_rnn.cc b/paddle/fluid/operators/math/jit_kernel_rnn.cc index fab293f7d03eb923995fa4cd99af955a34faa6a4..ba3e917377cf12192a068a9d71238442e12d5e5e 100644 --- a/paddle/fluid/operators/math/jit_kernel_rnn.cc +++ b/paddle/fluid/operators/math/jit_kernel_rnn.cc @@ -179,23 +179,23 @@ class LSTMKernelImpl : public LSTMKernel { /* C_t = C_t-1 * fgated + cand_gated * igated */ act_cand_d_->Compute(gates, gates); - vmul_d_->Compute(gates, gates + d_, gates + d_); - vmul_d_->Compute(ct_1, gates + d2_, gates + d2_); - vadd_d_->Compute(gates + d_, gates + d2_, ct); + vmul_d_->Compute(gates, gates + d_, gates + d_, d_); + vmul_d_->Compute(ct_1, gates + d2_, gates + d2_, d_); + vadd_d_->Compute(gates + d_, gates + d2_, ct, d_); /* H_t = act_cell(C_t) * ogated */ act_cell_d_->Compute(ct, gates + d2_); - vmul_d_->Compute(gates + d2_, gates + d3_, ht); + vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); } void ComputeC1H1(T* gates, T* ct, T* ht, const T* wp_data) const override { /* C_t = igated * cgated*/ act_gate_d_->Compute(gates + d_, gates + d_); act_cand_d_->Compute(gates, gates); - vmul_d_->Compute(gates, gates + d_, ct); + vmul_d_->Compute(gates, gates + d_, ct, d_); /* H_t = act_cell(C_t) * ogated */ act_gate_d_->Compute(gates + d3_, gates + d3_); act_cell_d_->Compute(ct, gates + d2_); - vmul_d_->Compute(gates + d2_, gates + d3_, ht); + vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); } private: @@ -289,36 +289,36 @@ class PeepholeKernelImpl : public LSTMKernel { void ComputeCtHt(T* gates, const T* ct_1, T* ct, T* ht, const T* wp_data, T* checked) const override { /* get fgated and igated*/ - vmul_d_->Compute(wp_data, ct_1, checked); - vmul_d_->Compute(wp_data + d_, ct_1, checked + d_); - vadd_d2_->Compute(checked, gates + d_, gates + d_); + vmul_d_->Compute(wp_data, ct_1, checked, d_); + vmul_d_->Compute(wp_data + d_, ct_1, checked + d_, d_); + vadd_d2_->Compute(checked, gates + d_, gates + d_, d2_); act_gate_d2_->Compute(gates + d_, gates + d_); /* C_t = C_t-1 * fgated + cand_gated * igated*/ act_cand_d_->Compute(gates, gates); - vmul_d_->Compute(gates, gates + d_, gates + d_); - vmul_d_->Compute(ct_1, gates + d2_, gates + d2_); - vadd_d_->Compute(gates + d_, gates + d2_, ct); + vmul_d_->Compute(gates, gates + d_, gates + d_, d_); + vmul_d_->Compute(ct_1, gates + d2_, gates + d2_, d_); + vadd_d_->Compute(gates + d_, gates + d2_, ct, d_); /* get ogated*/ - vmul_d_->Compute(wp_data + d2_, ct, gates + d_); - vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_); + vmul_d_->Compute(wp_data + d2_, ct, gates + d_, d_); + vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_, d_); act_gate_d_->Compute(gates + d3_, gates + d3_); /* H_t = act_cell(C_t) * ogated */ act_cell_d_->Compute(ct, gates + d2_); - vmul_d_->Compute(gates + d2_, gates + d3_, ht); + vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); } void ComputeC1H1(T* gates, T* ct, T* ht, const T* wp_data) const override { /* C_t = igated * cgated*/ act_gate_d_->Compute(gates + d_, gates + d_); act_cand_d_->Compute(gates, gates); - vmul_d_->Compute(gates, gates + d_, ct); + vmul_d_->Compute(gates, gates + d_, ct, d_); /* get outgated, put W_oc * C_t on igated */ - vmul_d_->Compute(wp_data + d2_, ct, gates + d_); - vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_); + vmul_d_->Compute(wp_data + d2_, ct, gates + d_, d_); + vadd_d_->Compute(gates + d_, gates + d3_, gates + d3_, d_); /* H_t = act_cell(C_t) * ogated */ act_gate_d_->Compute(gates + d3_, gates + d3_); act_cell_d_->Compute(ct, gates + d2_); - vmul_d_->Compute(gates + d2_, gates + d3_, ht); + vmul_d_->Compute(gates + d2_, gates + d3_, ht, d_); } private: @@ -352,8 +352,8 @@ class PeepholeKernelImpl : public LSTMKernel { act_cell, d)); \ } -REGISTER_JITKERNEL_ARGS(lstm, LSTMKernel, JITKERNEL_DECLARE_LSTM, - JITKERNEL_KEY_LSTM, JITKERNEL_NEW_LSTM_IMPL); +REGISTER_JITKERNEL_ARGS_DEPRECATED(lstm, LSTMKernel, JITKERNEL_DECLARE_LSTM, + JITKERNEL_KEY_LSTM, JITKERNEL_NEW_LSTM_IMPL); #undef INTRI8_FLOAT #undef JITKERNEL_DECLARE_LSTM @@ -378,13 +378,13 @@ class GRUKernelImpl : public GRUKernel { void ComputeH1(T* gates, T* ht) const override { act_gate_d_->Compute(gates, gates); act_state_d_->Compute(gates + d2_, gates + d2_); - vmul_d_->Compute(gates, gates + d2_, ht); + vmul_d_->Compute(gates, gates + d2_, ht, d_); } void ComputeHtPart1(T* gates, const T* ht_1, T* ht) const override { // W: {W_update, W_reset; W_state} act_gate_d2_->Compute(gates, gates); - vmul_d_->Compute(ht_1, gates + d_, ht); + vmul_d_->Compute(ht_1, gates + d_, ht, d_); } void ComputeHtPart2(T* gates, const T* ht_1, T* ht) const override { @@ -472,8 +472,8 @@ INTRI8_FLOAT(jit::avx512f); p = std::dynamic_pointer_cast>( \ std::make_shared>(act_gate, act_state, d)); -REGISTER_JITKERNEL_ARGS(gru, GRUKernel, JITKERNEL_DECLARE_GRU, - JITKERNEL_KEY_GRU, JITKERNEL_NEW_GRU_IMPL); +REGISTER_JITKERNEL_ARGS_DEPRECATED(gru, GRUKernel, JITKERNEL_DECLARE_GRU, + JITKERNEL_KEY_GRU, JITKERNEL_NEW_GRU_IMPL); #undef INTRI8_FLOAT #undef JITKERNEL_NEW_GRU_IMPL diff --git a/paddle/fluid/operators/math/jit_kernel_test.cc b/paddle/fluid/operators/math/jit_kernel_test.cc index c9e6ab740da4e39de3dc41f9df352b88e696c38d..9a19424691fad70c161ca6036c5cdfd3b2b22ada 100644 --- a/paddle/fluid/operators/math/jit_kernel_test.cc +++ b/paddle/fluid/operators/math/jit_kernel_test.cc @@ -369,12 +369,12 @@ void lstm_ctht_better( int d2 = d * 2; vsigmoid_3d->Compute(gates + d, gates + d); vtanh_d->Compute(gates, gates); - vmul_d->Compute(gates, gates + d, gates + d); - vmul_d->Compute(ct_1, gates + d2, gates + d2); - vadd_d->Compute(gates + d, gates + d2, ct); + vmul_d->Compute(gates, gates + d, gates + d, d); + vmul_d->Compute(ct_1, gates + d2, gates + d2, d); + vadd_d->Compute(gates + d, gates + d2, ct, d); /* H_t = act_cell(C_t) * ogated */ vtanh_d->Compute(ct, gates + d2); - vmul_d->Compute(gates + d2, gates + d * 3, ht); + vmul_d->Compute(gates + d2, gates + d * 3, ht, d); } TEST(JitKernel, lstm) { @@ -578,7 +578,7 @@ void vmul_mkl(const int n, const float* x, const float* y, float* z) { TEST(JitKernel, vmul) { namespace jit = paddle::operators::math::jitkernel; - for (int d : {7, 8, 15, 16, 30, 256, 512}) { + for (int d : {7, 8, 15, 16, 20, 30, 256, 512, 1000, 1024}) { std::vector x(d), y(d); std::vector zref(d), ztgt(d); RandomVec(d, x.data()); @@ -616,7 +616,7 @@ TEST(JitKernel, vmul) { auto ttgts = GetCurrentUS(); for (int i = 0; i < repeat; ++i) { - ker->Compute(x_data, y_data, ztgt_data); + ker->Compute(x_data, y_data, ztgt_data, d); } auto ttgte = GetCurrentUS(); @@ -695,7 +695,7 @@ TEST(JitKernel, vadd) { auto ttgts = GetCurrentUS(); for (int i = 0; i < repeat; ++i) { - ker->Compute(x_data, y_data, ztgt_data); + ker->Compute(x_data, y_data, ztgt_data, d); } auto ttgte = GetCurrentUS(); @@ -723,8 +723,8 @@ void vaddrelu_better( const paddle::operators::math::jitkernel::VAddKernel>& vadd, const std::shared_ptr< const paddle::operators::math::jitkernel::VReluKernel>& vrelu, - const float* x, const float* y, float* z) { - vadd->Compute(x, y, z); + const float* x, const float* y, float* z, int d) { + vadd->Compute(x, y, z, d); vrelu->Compute(z, z); } @@ -752,12 +752,12 @@ TEST(JitKernel, vaddrelu) { auto trefe = GetCurrentUS(); auto tmkls = GetCurrentUS(); for (int i = 0; i < repeat; ++i) { - vaddrelu_better(vadd, vrelu, x_data, y_data, zref_data); + vaddrelu_better(vadd, vrelu, x_data, y_data, zref_data, d); } auto tmkle = GetCurrentUS(); auto ttgts = GetCurrentUS(); for (int i = 0; i < repeat; ++i) { - ker->Compute(x_data, y_data, ztgt_data); + ker->Compute(x_data, y_data, ztgt_data, d); } auto ttgte = GetCurrentUS(); VLOG(3) << "Vec size " << d << ": refer takes: " << (trefe - trefs) / repeat @@ -800,8 +800,12 @@ TEST(JitKernel, pool) { EXPECT_TRUE(std::dynamic_pointer_cast(pvmul_f) != std::dynamic_pointer_cast(pvmul_d)); - const auto& pvmul_from_key = jit::KernelPool::Instance().Get("vmulf4"); - EXPECT_EQ(pvmul_f, pvmul_from_key); - const auto& pvmul_from_key2 = jit::KernelPool::Instance().Get("vmulf5"); + const auto& pvmul_from_key = jit::KernelPool::Instance().Get("vmulfjit4"); +#if defined(__APPLE__) || defined(__OSX__) || defined(_WIN32) + EXPECT_EQ(pvmul_from_key, nullptr); +#else + EXPECT_EQ(pvmul_from_key, pvmul_f); +#endif + const auto& pvmul_from_key2 = jit::KernelPool::Instance().Get("vmulfjit"); EXPECT_TRUE(pvmul_from_key2 == nullptr); } diff --git a/paddle/fluid/operators/math/pooling.cc b/paddle/fluid/operators/math/pooling.cc index b871851798e48e6b598cb4ab8e2e42db478a3820..8df43bb616179e2487534e0acabb71b09b87e1af 100644 --- a/paddle/fluid/operators/math/pooling.cc +++ b/paddle/fluid/operators/math/pooling.cc @@ -31,7 +31,7 @@ class Pool2dFunctor { const framework::Tensor& input, const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_process, - framework::Tensor* output) { + bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -68,7 +68,8 @@ class Pool2dFunctor { pool_process.compute(input_data[h * input_width + w], &ele); } } - int pool_size = (hend - hstart) * (wend - wstart); + int pool_size = exclusive ? (hend - hstart) * (wend - wstart) + : ksize_height * ksize_width; pool_process.finalize(static_cast(pool_size), &ele); output_data[ph * output_width + pw] = ele; } @@ -93,7 +94,7 @@ class Pool2dGradFunctor { const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_grad_process, - framework::Tensor* input_grad) { + bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -124,7 +125,8 @@ class Pool2dGradFunctor { int wstart = pw * stride_width - padding_width; int wend = std::min(wstart + ksize_width, input_width); wstart = std::max(wstart, 0); - int pool_size = (hend - hstart) * (wend - wstart); + int pool_size = exclusive ? (hend - hstart) * (wend - wstart) + : ksize_height * ksize_width; float scale = 1.0 / pool_size; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { @@ -249,7 +251,7 @@ class Pool3dFunctor { const framework::Tensor& input, const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_process, - framework::Tensor* output) { + bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; @@ -300,7 +302,9 @@ class Pool3dFunctor { } } int pool_size = - (dend - dstart) * (hend - hstart) * (wend - wstart); + exclusive + ? (dend - dstart) * (hend - hstart) * (wend - wstart) + : ksize_depth * ksize_height * ksize_width; pool_process.finalize(static_cast(pool_size), &ele); output_data[output_idx] = ele; } @@ -326,7 +330,7 @@ class Pool3dGradFunctor { const framework::Tensor& output, const framework::Tensor& output_grad, const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_grad_process, - framework::Tensor* input_grad) { + bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; @@ -369,7 +373,9 @@ class Pool3dGradFunctor { wstart = std::max(wstart, 0); int pool_size = - (dend - dstart) * (hend - hstart) * (wend - wstart); + exclusive + ? (dend - dstart) * (hend - hstart) * (wend - wstart) + : ksize_depth * ksize_height * ksize_width; float scale = 1.0 / pool_size; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { diff --git a/paddle/fluid/operators/math/pooling.cu b/paddle/fluid/operators/math/pooling.cu index b1c76350d1724629bae175abf47e6671a1532242..a689eb42242e551caa3470f34f7e8d7e80b6dfbe 100644 --- a/paddle/fluid/operators/math/pooling.cu +++ b/paddle/fluid/operators/math/pooling.cu @@ -29,7 +29,7 @@ __global__ void KernelPool2D(const int nthreads, const T* input_data, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, PoolProcess pool_process, - T* output_data) { + bool exclusive, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; @@ -52,7 +52,8 @@ __global__ void KernelPool2D(const int nthreads, const T* input_data, pool_process.compute(input_data[h * input_width + w], &ele); } } - int pool_size = (hend - hstart) * (wend - wstart); + int pool_size = exclusive ? (hend - hstart) * (wend - wstart) + : ksize_height * ksize_width; pool_process.finalize(static_cast(pool_size), &ele); output_data[index] = ele; } @@ -65,7 +66,7 @@ __global__ void KernelPool2DGrad( const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, - PoolProcess pool_process, T* input_grad) { + PoolProcess pool_process, bool exclusive, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; @@ -95,7 +96,8 @@ __global__ void KernelPool2DGrad( int wend = min(wstart + ksize_width, input_width); hstart = max(hstart, 0); wstart = max(wstart, 0); - int pool_size = (hend - hstart) * (wend - wstart); + int pool_size = exclusive ? (hend - hstart) * (wend - wstart) + : ksize_height * ksize_width; int output_sub_idx = ph * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], @@ -163,7 +165,7 @@ class Pool2dFunctor { const framework::Tensor& input, const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_process, - framework::Tensor* output) { + bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; @@ -189,7 +191,8 @@ class Pool2dFunctor { KernelPool2D<<>>( nthreads, input_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, - stride_width, padding_height, padding_width, pool_process, output_data); + stride_width, padding_height, padding_width, pool_process, exclusive, + output_data); } }; @@ -208,7 +211,7 @@ class Pool2dGradFunctor { const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_process, - framework::Tensor* input_grad) { + bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; @@ -236,7 +239,7 @@ class Pool2dGradFunctor { nthreads, input_data, output_data, output_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, - pool_process, input_grad_data); + pool_process, exclusive, input_grad_data); } }; @@ -313,16 +316,14 @@ template class Pool2dGradFunctor; template -__global__ void KernelPool3D(const int nthreads, const T* input_data, - const int channels, const int input_depth, - const int input_height, const int input_width, - const int output_depth, const int output_height, - const int output_width, const int ksize_depth, - const int ksize_height, const int ksize_width, - const int stride_depth, const int stride_height, - const int stride_width, const int padding_depth, - const int padding_height, const int padding_width, - PoolProcess pool_process, T* output_data) { +__global__ void KernelPool3D( + const int nthreads, const T* input_data, const int channels, + const int input_depth, const int input_height, const int input_width, + const int output_depth, const int output_height, const int output_width, + const int ksize_depth, const int ksize_height, const int ksize_width, + const int stride_depth, const int stride_height, const int stride_width, + const int padding_depth, const int padding_height, const int padding_width, + PoolProcess pool_process, bool exclusive, T* output_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; @@ -351,7 +352,9 @@ __global__ void KernelPool3D(const int nthreads, const T* input_data, } } } - int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); + int pool_size = exclusive + ? (dend - dstart) * (hend - hstart) * (wend - wstart) + : ksize_depth * ksize_height * ksize_width; pool_process.finalize(static_cast(pool_size), &ele); output_data[index] = ele; } @@ -366,7 +369,7 @@ __global__ void KernelPool3DGrad( const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, PoolProcess pool_process, - T* input_grad) { + bool exclusive, T* input_grad) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; @@ -409,7 +412,9 @@ __global__ void KernelPool3DGrad( dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); - int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); + int pool_size = + exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart) + : ksize_depth * ksize_height * ksize_width; int output_sub_idx = (pd * output_height + ph) * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], @@ -484,7 +489,7 @@ class Pool3dFunctor { const framework::Tensor& input, const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_process, - framework::Tensor* output) { + bool exclusive, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; @@ -517,7 +522,7 @@ class Pool3dFunctor { nthreads, input_data, input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, - padding_depth, padding_height, padding_width, pool_process, + padding_depth, padding_height, padding_width, pool_process, exclusive, output_data); } }; @@ -537,7 +542,7 @@ class Pool3dGradFunctor { const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_process, - framework::Tensor* input_grad) { + bool exclusive, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; @@ -573,7 +578,7 @@ class Pool3dGradFunctor { input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, - padding_width, pool_process, input_grad_data); + padding_width, pool_process, exclusive, input_grad_data); } }; diff --git a/paddle/fluid/operators/math/pooling.h b/paddle/fluid/operators/math/pooling.h index 120f5919803806e0d3b7dc8eaf530ae89819b84d..0f64e321bf01eea69767af020ed8c1a75e31acb5 100644 --- a/paddle/fluid/operators/math/pooling.h +++ b/paddle/fluid/operators/math/pooling.h @@ -89,7 +89,7 @@ class Pool2dFunctor { const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_compute, - framework::Tensor* output); + bool exclusive, framework::Tensor* output); }; template @@ -101,7 +101,7 @@ class Pool2dGradFunctor { const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_compute, - framework::Tensor* input_grad); + bool exclusive, framework::Tensor* input_grad); }; template @@ -123,7 +123,7 @@ class Pool3dFunctor { const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_compute, - framework::Tensor* output); + bool exclusive, framework::Tensor* output); }; template @@ -135,7 +135,7 @@ class Pool3dGradFunctor { const std::vector& ksize, const std::vector& strides, const std::vector& paddings, PoolProcess pool_compute, - framework::Tensor* input_grad); + bool exclusive, framework::Tensor* input_grad); }; template diff --git a/paddle/fluid/operators/math/selected_rows_functor.cu b/paddle/fluid/operators/math/selected_rows_functor.cu index 10f39822b9c904ce236a1a2a3806d70693bd2e63..c4fccdbf862fda8a599869c30ae598573ca367aa 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.cu +++ b/paddle/fluid/operators/math/selected_rows_functor.cu @@ -18,6 +18,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { @@ -80,7 +81,7 @@ template __global__ void SelectedRowsAddTensorKernel(const T* selected_rows, const int64_t* rows, T* tensor_out, int64_t row_numel) { - const int ty = blockIdx.y; + const int ty = blockIdx.x; int tid = threadIdx.x; selected_rows += ty * row_numel; @@ -118,11 +119,11 @@ struct SelectedRowsAddTensor { auto* out_data = output->data(); SetConstant functor; - functor(context, output, 0.0); + functor(context, output, static_cast(0)); const int block_size = 256; dim3 threads(block_size, 1); - dim3 grid(1, in1_rows.size()); + dim3 grid(in1_rows.size(), 1); SelectedRowsAddTensorKernel< T, block_size><<>>( in1_data, in1_rows.CUDAData(context.GetPlace()), out_data, @@ -136,6 +137,9 @@ struct SelectedRowsAddTensor { template struct SelectedRowsAddTensor; template struct SelectedRowsAddTensor; +template struct SelectedRowsAdd; +template struct SelectedRowsAddTensor; template struct SelectedRowsAddTo { @@ -175,6 +179,8 @@ template struct SelectedRowsAddTo; template struct SelectedRowsAddTo; template struct SelectedRowsAddTo; template struct SelectedRowsAddTo; +template struct SelectedRowsAddTo; namespace { template @@ -182,7 +188,7 @@ __global__ void SelectedRowsAddToTensorKernel(const T* selected_rows, const int64_t* rows, T* tensor_out, int64_t row_numel) { - const int ty = blockIdx.y; + const int ty = blockIdx.x; int tid = threadIdx.x; selected_rows += ty * row_numel; @@ -215,7 +221,7 @@ struct SelectedRowsAddToTensor { auto* in2_data = input2->data(); const int block_size = 256; dim3 threads(block_size, 1); - dim3 grid(1, in1_rows.size()); + dim3 grid(in1_rows.size(), 1); SelectedRowsAddToTensorKernel< T, block_size><<>>( in1_data, in1_rows.CUDAData(context.GetPlace()), in2_data, @@ -227,6 +233,8 @@ template struct SelectedRowsAddToTensor; template struct SelectedRowsAddToTensor; template struct SelectedRowsAddToTensor; template struct SelectedRowsAddToTensor; +template struct SelectedRowsAddToTensor; namespace scatter { @@ -287,7 +295,7 @@ struct MergeAdd { context.GetPlace()); math::SetConstant constant_functor; - constant_functor(context, out.mutable_value(), 0.0); + constant_functor(context, out.mutable_value(), static_cast(0)); auto* out_data = out.mutable_value()->data(); auto* input_data = input.value().data(); @@ -347,7 +355,7 @@ struct MergeAdd { context.GetPlace()); math::SetConstant constant_functor; - constant_functor(context, out.mutable_value(), 0.0); + constant_functor(context, out.mutable_value(), static_cast(0)); auto* out_data = out.mutable_value()->data(); @@ -374,12 +382,13 @@ template struct MergeAdd; template struct MergeAdd; template struct MergeAdd; template struct MergeAdd; +template struct MergeAdd; template __global__ void UpdateToTensorKernel(const T* selected_rows, const int64_t* rows, const ScatterOps& op, T* tensor_out, int64_t row_numel) { - const int ty = blockIdx.y; + const int ty = blockIdx.x; int tid = threadIdx.x; selected_rows += ty * row_numel; @@ -448,7 +457,7 @@ struct UpdateToTensor { auto* in2_data = input2->data(); dim3 threads(platform::PADDLE_CUDA_NUM_THREADS, 1); - dim3 grid(1, in1_rows.size()); + dim3 grid(in1_rows.size(), 1); UpdateToTensorKernel<<< grid, threads, 0, context.stream()>>>(in1_data, in1_rows.cuda_data(), op, in2_data, in1_row_numel); diff --git a/paddle/fluid/operators/math/selected_rows_functor.h b/paddle/fluid/operators/math/selected_rows_functor.h index 521c53dd0d71707c13c4364c5ee59943a03d4a2d..b24ffb57acd4f4ef4430a1bdff30d2f96e349803 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.h +++ b/paddle/fluid/operators/math/selected_rows_functor.h @@ -88,57 +88,6 @@ struct MergeAdd { framework::SelectedRows* output); }; -template -struct Add { - framework::SelectedRows operator()(const DeviceContext& context, - const framework::SelectedRows& input1, - const framework::SelectedRows& input2) { - framework::SelectedRows out; - out.set_rows(input1.rows()); - out.set_height(input1.height()); - out.mutable_value()->mutable_data(input1.value().dims(), - context.GetPlace()); - auto e_out = framework::EigenVector::Flatten(*(out.mutable_value())); - auto e_in1 = framework::EigenVector::Flatten(input1.value()); - auto e_in2 = framework::EigenVector::Flatten(input2.value()); - e_out.device(*context.eigen_device()) = e_in1 + e_in2; - return out; - } -}; - -template -struct Mul { - // multiply two SelectedRows - framework::SelectedRows operator()(const DeviceContext& context, - const framework::SelectedRows& input1, - const framework::SelectedRows& input2) { - framework::SelectedRows out; - out.set_rows(input1.rows()); - out.set_height(input1.height()); - out.mutable_value()->mutable_data(input1.value().dims(), - context.GetPlace()); - auto e_out = framework::EigenVector::Flatten(*(out.mutable_value())); - auto e_in1 = framework::EigenVector::Flatten(input1.value()); - auto e_in2 = framework::EigenVector::Flatten(input2.value()); - e_out.device(*context.eigen_device()) = e_in1 * e_in2; - return out; - } - // multiply scalar to SelectedRows - framework::SelectedRows operator()(const DeviceContext& context, - const framework::SelectedRows& input1, - const T input2) { - framework::SelectedRows out; - out.set_rows(input1.rows()); - out.set_height(input1.height()); - out.mutable_value()->mutable_data(input1.value().dims(), - context.GetPlace()); - auto e_out = framework::EigenVector::Flatten(*(out.mutable_value())); - auto e_in1 = framework::EigenVector::Flatten(input1.value()); - e_out.device(*context.eigen_device()) = input2 * e_in1; - return out; - } -}; - enum class ScatterOps { ASSIGN, ADD, SUB, SUBBY, MUL, DIV, DIVBY }; // out = seleted_rows_in / tensor diff --git a/paddle/fluid/operators/math/softmax.cu b/paddle/fluid/operators/math/softmax.cu index 3effe776258cb541dbba32f63eda457d917011f4..ce183ed3649055aab31eb6e3f44f2224475957e9 100644 --- a/paddle/fluid/operators/math/softmax.cu +++ b/paddle/fluid/operators/math/softmax.cu @@ -96,12 +96,15 @@ template class SoftmaxCUDNNFunctor; template class SoftmaxCUDNNFunctor; template class SoftmaxGradCUDNNFunctor; template class SoftmaxGradCUDNNFunctor; +template class SoftmaxGradCUDNNFunctor; template class SoftmaxFunctor; template class SoftmaxFunctor; template class SoftmaxFunctor; template class SoftmaxGradFunctor; template class SoftmaxGradFunctor; +template class SoftmaxGradFunctor; } // namespace math } // namespace operators diff --git a/paddle/fluid/operators/mean_op.cu b/paddle/fluid/operators/mean_op.cu index 91e0ab28efc21d4376524c8ecf66b429d51d8847..413b8ace67bd0a36849373812950834523b62216 100644 --- a/paddle/fluid/operators/mean_op.cu +++ b/paddle/fluid/operators/mean_op.cu @@ -15,11 +15,15 @@ limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/fluid/operators/mean_op.h" +#include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; +namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( mean, ops::MeanKernel, - ops::MeanKernel); + ops::MeanKernel, + ops::MeanKernel); REGISTER_OP_CUDA_KERNEL( mean_grad, ops::MeanGradKernel, - ops::MeanGradKernel); + ops::MeanGradKernel, + ops::MeanGradKernel); diff --git a/paddle/fluid/operators/mean_op.h b/paddle/fluid/operators/mean_op.h index 362e9f9ae8b2f0f77198e3f3939211ae1117b27b..360b2f68a749f630d3c7ed009c16cb51ec150581 100644 --- a/paddle/fluid/operators/mean_op.h +++ b/paddle/fluid/operators/mean_op.h @@ -55,8 +55,7 @@ class MeanGradKernel : public framework::OpKernel { IG->mutable_data(context.GetPlace()); T ig_size = static_cast(IG->numel()); - Eigen::DSizes bcast(ig_size); - + Eigen::DSizes bcast(static_cast(ig_size)); EigenVector::Flatten(*IG).device( *context.template device_context().eigen_device()) = (EigenVector::From(*OG) / ig_size).broadcast(bcast); diff --git a/paddle/fluid/operators/mul_op.cu.cc b/paddle/fluid/operators/mul_op.cu.cc index 81f3e42bf412fa4d2cb48405f2f8ee49b6aa0b67..6c5a83c6a50c463502171f09bbf18e17e43917b5 100644 --- a/paddle/fluid/operators/mul_op.cu.cc +++ b/paddle/fluid/operators/mul_op.cu.cc @@ -20,6 +20,7 @@ namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(mul, ops::MulKernel, ops::MulKernel, ops::MulKernel); -REGISTER_OP_CUDA_KERNEL(mul_grad, - ops::MulGradKernel, - ops::MulGradKernel); +REGISTER_OP_CUDA_KERNEL( + mul_grad, ops::MulGradKernel, + ops::MulGradKernel, + ops::MulGradKernel); diff --git a/paddle/fluid/operators/pool_cudnn_op.cu.cc b/paddle/fluid/operators/pool_cudnn_op.cu.cc index 31f083565fddee66aea1485ed71f41b6199f4502..4a332ce10b59b21d2518684237ce0bbf1bbfa75a 100644 --- a/paddle/fluid/operators/pool_cudnn_op.cu.cc +++ b/paddle/fluid/operators/pool_cudnn_op.cu.cc @@ -41,6 +41,7 @@ class PoolCUDNNOpKernel : public framework::OpKernel { T *output_data = output->mutable_data(ctx.GetPlace()); std::string pooling_type = ctx.Attr("pooling_type"); + bool exclusive = ctx.Attr("exclusive"); std::vector ksize = ctx.Attr>("ksize"); std::vector strides = ctx.Attr>("strides"); std::vector paddings = ctx.Attr>("paddings"); @@ -72,7 +73,8 @@ class PoolCUDNNOpKernel : public framework::OpKernel { if (pooling_type == "max") { pooling_mode = PoolingMode::kMaximum; } else { - pooling_mode = PoolingMode::kAverage; + pooling_mode = exclusive ? PoolingMode::kAverageExclusive + : PoolingMode::kAverageInclusive; } cudnnPoolingDescriptor_t cudnn_pool_desc = @@ -101,6 +103,7 @@ class PoolCUDNNGradOpKernel : public framework::OpKernel { Tensor *input_grad = ctx.Output(framework::GradVarName("X")); std::string pooling_type = ctx.Attr("pooling_type"); + bool exclusive = ctx.Attr("exclusive"); std::vector ksize = ctx.Attr>("ksize"); std::vector strides = ctx.Attr>("strides"); std::vector paddings = ctx.Attr>("paddings"); @@ -141,7 +144,8 @@ class PoolCUDNNGradOpKernel : public framework::OpKernel { pooling_mode = PoolingMode::kMaximum; } } else { - pooling_mode = PoolingMode::kAverage; + pooling_mode = exclusive ? PoolingMode::kAverageExclusive + : PoolingMode::kAverageInclusive; } cudnnPoolingDescriptor_t cudnn_pool_desc = @@ -174,7 +178,8 @@ REGISTER_OP_KERNEL(pool2d, CUDNN, plat::CUDAPlace, ops::PoolCUDNNOpKernel); REGISTER_OP_KERNEL(pool2d_grad, CUDNN, plat::CUDAPlace, ops::PoolCUDNNGradOpKernel, - ops::PoolCUDNNGradOpKernel); + ops::PoolCUDNNGradOpKernel, + ops::PoolCUDNNGradOpKernel); REGISTER_OP_KERNEL(pool3d, CUDNN, plat::CUDAPlace, ops::PoolCUDNNOpKernel, diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index 24a5346b031008531fcefff0e6f1c31da33d1c3b..484cb65746612343fafc49fe61b607f2e919cf4f 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -180,6 +180,12 @@ void Pool2dOpMaker::Make() { "operator." "If global_pooling = true, paddings and ksize will be ignored.") .SetDefault({0, 0}); + AddAttr( + "exclusive", + "(bool, default True) When true, will exclude the zero-padding in the " + "averaging calculating, otherwise, include the zero-padding. Note, it " + "is only used when pooling_type is avg. The defalut is True.") + .SetDefault(true); AddAttr( "use_cudnn", "(bool, default false) Only used in cudnn kernel, need install cudnn") @@ -236,6 +242,23 @@ Example: W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1 $$ + For exclusive = true: + $$ + hstart = i * strides[0] - paddings[0] + hend = hstart + ksize[0] + wstart = j * strides[1] - paddings[1] + wend = wstart + ksize[1] + Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]} + $$ + For exclusive = false: + $$ + hstart = max(0, i * strides[0] - paddings[0]) + hend = min(H, hstart + ksize[0]) + wstart = max(0, j * strides[1] - paddings[1]) + wend = min(W, wstart + ksize[1]) + Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)} + $$ + )DOC"); } @@ -283,6 +306,12 @@ void Pool3dOpMaker::Make() { "If global_pooling = true, ksize and paddings will be ignored.") .SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) + AddAttr( + "exclusive", + "(bool, default True) When true, will exclude the zero-padding in the " + "averaging calculating, otherwise, include the zero-padding. Note, it " + "is only used when pooling_type is avg. The defalut is True.") + .SetDefault(true); AddAttr( "use_cudnn", diff --git a/paddle/fluid/operators/pool_op.h b/paddle/fluid/operators/pool_op.h index a63963ca926bb94ff99e5cfe6dbcb2b15075bcb8..c0594b7e3cc5602a44bb01951a22c2135ba5c7ce 100644 --- a/paddle/fluid/operators/pool_op.h +++ b/paddle/fluid/operators/pool_op.h @@ -69,6 +69,7 @@ class PoolKernel : public framework::OpKernel { std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); + bool exclusive = context.Attr("exclusive"); if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) { paddings[i] = 0; @@ -84,7 +85,7 @@ class PoolKernel : public framework::OpKernel { pool2d_forward; paddle::operators::math::MaxPool pool_process; pool2d_forward(dev_ctx, *in_x, ksize, strides, paddings, pool_process, - out); + true, out); } else if (pooling_type == "avg") { paddle::operators::math::Pool2dFunctor< @@ -92,7 +93,7 @@ class PoolKernel : public framework::OpKernel { pool2d_forward; paddle::operators::math::AvgPool pool_process; pool2d_forward(dev_ctx, *in_x, ksize, strides, paddings, pool_process, - out); + exclusive, out); } } break; case 3: { @@ -102,14 +103,14 @@ class PoolKernel : public framework::OpKernel { pool3d_forward; paddle::operators::math::MaxPool pool_process; pool3d_forward(dev_ctx, *in_x, ksize, strides, paddings, pool_process, - out); + true, out); } else if (pooling_type == "avg") { paddle::operators::math::Pool3dFunctor< DeviceContext, paddle::operators::math::AvgPool, T> pool3d_forward; paddle::operators::math::AvgPool pool_process; pool3d_forward(dev_ctx, *in_x, ksize, strides, paddings, pool_process, - out); + exclusive, out); } } break; default: { PADDLE_THROW("Pool op only supports 2D and 3D input."); } @@ -131,6 +132,7 @@ class PoolGradKernel : public framework::OpKernel { std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); + bool exclusive = context.Attr("exclusive"); if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) { @@ -157,7 +159,7 @@ class PoolGradKernel : public framework::OpKernel { pool2d_backward; paddle::operators::math::AvgPoolGrad pool_process; pool2d_backward(dev_ctx, *in_x, *out, *out_grad, ksize, strides, - paddings, pool_process, in_x_grad); + paddings, pool_process, exclusive, in_x_grad); } } break; case 3: { @@ -172,7 +174,7 @@ class PoolGradKernel : public framework::OpKernel { pool3d_backward; paddle::operators::math::AvgPoolGrad pool_process; pool3d_backward(dev_ctx, *in_x, *out, *out_grad, ksize, strides, - paddings, pool_process, in_x_grad); + paddings, pool_process, exclusive, in_x_grad); } } break; default: { PADDLE_THROW("Pool op only supports 2D and 3D input."); } diff --git a/paddle/fluid/operators/prefetch_op.cc b/paddle/fluid/operators/prefetch_op.cc index 0519c15e13aac99802ff0f95b975712b36b44246..490dfa41be2de987c51b7f06d988ce27980aa5f2 100644 --- a/paddle/fluid/operators/prefetch_op.cc +++ b/paddle/fluid/operators/prefetch_op.cc @@ -42,7 +42,8 @@ class PrefetchOp : public framework::OperatorBase { auto& ctx = *pool.Get(place); distributed::RPCClient* rpc_client = - distributed::RPCClient::GetInstance(); + distributed::RPCClient::GetInstance( + Attr("trainer_id")); std::vector rets; for (size_t i = 0; i < ins.size(); i++) { @@ -69,6 +70,7 @@ class PrefetchOpMaker : public framework::OpProtoAndCheckerMaker { "(LoDTensor) result " "to be fetched from parameter server") .AsDuplicable(); + AddAttr("trainer_id", "trainer id from 0 ~ worker_num.").SetDefault(0); AddAttr>( "epmap", "(string vector, default 127.0.0.1:6164)" diff --git a/paddle/fluid/operators/read_op.cc b/paddle/fluid/operators/read_op.cc index a0d640b2020958af53a4405ae886eadb2a1e117e..a0b70938d354cbb3bf10a9c8c589ba5153624f45 100644 --- a/paddle/fluid/operators/read_op.cc +++ b/paddle/fluid/operators/read_op.cc @@ -33,6 +33,19 @@ class ReadInferShape : public framework::InferShapeBase { reader_dims.size(), out_names.size(), "The reader's dim number doesn't match the output number."); ctx->SetOutputsDim("Out", reader_dims); + if (!ctx->IsRuntime()) { + auto in_desc = + boost::get(ctx->GetInputVarPtrs("Reader")[0]); + auto in_lod_levels = in_desc->GetLoDLevels(); + auto out_var_ptrs = ctx->GetOutputVarPtrs("Out"); + PADDLE_ENFORCE_EQ(in_lod_levels.size(), out_var_ptrs.size(), + "LoDLevels of Input(Reader) must be the same as the " + "number of Outputs(Out)."); + for (size_t i = 0; i < out_var_ptrs.size(); ++i) { + auto* out_desc = boost::get(out_var_ptrs[i]); + out_desc->SetLoDLevel(in_lod_levels[i]); + } + } } }; diff --git a/paddle/fluid/operators/recv_op.cc b/paddle/fluid/operators/recv_op.cc index 4d34b8a1686efb1fc30020f0d27e9a3c3a6c0866..0399ff41007fbe10da8d53a05671eb0cfb475a5f 100644 --- a/paddle/fluid/operators/recv_op.cc +++ b/paddle/fluid/operators/recv_op.cc @@ -42,7 +42,8 @@ class RecvOp : public framework::OperatorBase { auto& ctx = *pool.Get(place); distributed::RPCClient* rpc_client = - distributed::RPCClient::GetInstance(); + distributed::RPCClient::GetInstance( + Attr("trainer_id")); std::vector rets; for (size_t i = 0; i < outs.size(); i++) { @@ -73,6 +74,7 @@ This operator can get variables from server side. "Server endpoints in the order of input " "variables for mapping") .SetDefault({}); + AddAttr("trainer_id", "trainer id from 0 ~ worker_num.").SetDefault(0); AddAttr("sync_mode", "(int, default 0)" "sync recv or async recv.") diff --git a/paddle/fluid/operators/ref_by_trainer_id_op.cc b/paddle/fluid/operators/ref_by_trainer_id_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..6cb651af6dc3d8e301365968787c199acc4c60ee --- /dev/null +++ b/paddle/fluid/operators/ref_by_trainer_id_op.cc @@ -0,0 +1,79 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/ref_by_trainer_id_op.h" +#include + +namespace paddle { +namespace operators { + +class RefByTrainerIdOp : public framework::OperatorWithKernel { + public: + RefByTrainerIdOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInputs("X"), + "Input(X) of RefByTrainerIdOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("TrainerId"), + "Input(TrainerId) of RefByTrainerIdOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of RefByTrainerIdOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("TrainerId").size(), 1, + "TrainerId should be a scalar."); + // Out's shape is determined at runtime. + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + framework::ToDataType( + ctx.MultiInput("X")[0]->type()), + ctx.GetPlace()); + } +}; + +class RefByTrainerIdOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor) Input tensor list.").AsDuplicable(); + AddInput("TrainerId", "(Tensor) Scalar int, the trainer id runtime value."); + AddOutput("Out", "(Tensor) Return one tensor reference of X[trainer_id]"); + AddComment(R"DOC( +**RefByTrainerId operator** + +Return a reference of a tensor, using trainer_id as the index to find from the input. + +$$Out = X[TrainerId]$$ +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP_WITHOUT_GRADIENT(ref_by_trainer_id, ops::RefByTrainerIdOp, + ops::RefByTrainerIdOpMaker); +REGISTER_OP_CPU_KERNEL( + ref_by_trainer_id, + ops::RefByTrainerIdKernel, + ops::RefByTrainerIdKernel, + ops::RefByTrainerIdKernel, + ops::RefByTrainerIdKernel); diff --git a/paddle/fluid/operators/ref_by_trainer_id_op.cu.cc b/paddle/fluid/operators/ref_by_trainer_id_op.cu.cc new file mode 100644 index 0000000000000000000000000000000000000000..b98e2b5c9c7341f2a424fb4b32ff1e8bc45a056c --- /dev/null +++ b/paddle/fluid/operators/ref_by_trainer_id_op.cu.cc @@ -0,0 +1,26 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/ref_by_trainer_id_op.h" + +REGISTER_OP_CUDA_KERNEL( + ref_by_trainer_id, + paddle::operators::RefByTrainerIdKernel, + paddle::operators::RefByTrainerIdKernel, + paddle::operators::RefByTrainerIdKernel, + paddle::operators::RefByTrainerIdKernel); diff --git a/paddle/fluid/operators/ref_by_trainer_id_op.h b/paddle/fluid/operators/ref_by_trainer_id_op.h new file mode 100644 index 0000000000000000000000000000000000000000..2ce577544ae2437b9297da2190fd09b435d5173c --- /dev/null +++ b/paddle/fluid/operators/ref_by_trainer_id_op.h @@ -0,0 +1,48 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { +template +class RefByTrainerIdKernel : public framework::OpKernel { + public: + virtual void Compute(const framework::ExecutionContext& context) const { + auto* out = context.Output("Out"); + auto in_list = context.MultiInput("X"); + auto* trainer_id_t = context.Input("TrainerId"); + int64_t trainer_id = 0; + auto* trainer_id_data = trainer_id_t->data(); + if (platform::is_gpu_place(context.GetPlace())) { +#ifdef PADDLE_WITH_CUDA + auto stream = context.cuda_device_context().stream(); + memory::Copy<>(platform::CPUPlace(), &trainer_id, + boost::get(context.GetPlace()), + trainer_id_data, sizeof(int64_t), stream); +#endif + } else { + trainer_id = *trainer_id_data; + } + PADDLE_ENFORCE_LT(trainer_id, in_list.size()); + out->mutable_data(context.GetPlace()); + out->ShareDataWith(*(in_list[trainer_id])); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/rmsprop_op.h b/paddle/fluid/operators/rmsprop_op.h index 797cd45fdcdbd5c3567d1676f37e148304ee6e2d..389c84d2464090ff9bd9e8b471cd0103c86a347a 100644 --- a/paddle/fluid/operators/rmsprop_op.h +++ b/paddle/fluid/operators/rmsprop_op.h @@ -179,8 +179,8 @@ class RmspropOpKernel : public framework::OpKernel { auto &mg_tensor = *ctx.Input("MeanGrad"); auto mg = EigenVector::Flatten(mg_tensor); auto *mean_grad_out = ctx.Output("MeanGradOut"); - PADDLE_ENFORCE(&mg_tensor, mean_grad_out, - "MeanGrad and MeanGradOut must be the same Tensor"); + PADDLE_ENFORCE_EQ(&mg_tensor, mean_grad_out, + "MeanGrad and MeanGradOut must be the same Tensor"); auto mg_out = EigenVector::Flatten(*mean_grad_out); mg_out.device(place) = rho * mg + (1 - rho) * g; @@ -198,8 +198,8 @@ class RmspropOpKernel : public framework::OpKernel { if (centered) { auto &mg_tensor = *ctx.Input("MeanGrad"); auto *mean_grad_out = ctx.Output("MeanGradOut"); - PADDLE_ENFORCE(&mg_tensor, mean_grad_out, - "MeanGrad and MeanGradOut must be the same Tensor"); + PADDLE_ENFORCE_EQ(&mg_tensor, mean_grad_out, + "MeanGrad and MeanGradOut must be the same Tensor"); for_range(CenteredRmspropFunctor>( param_out->mutable_data(ctx.GetPlace()), mean_square_out->mutable_data(ctx.GetPlace()), @@ -243,8 +243,8 @@ class RmspropOpKernel : public framework::OpKernel { if (centered) { auto &mg_tensor = *ctx.Input("MeanGrad"); auto *mean_grad_out = ctx.Output("MeanGradOut"); - PADDLE_ENFORCE(&mg_tensor, mean_grad_out, - "MeanGrad and MeanGradOut must be the same Tensor"); + PADDLE_ENFORCE_EQ(&mg_tensor, mean_grad_out, + "MeanGrad and MeanGradOut must be the same Tensor"); for_range(CenteredRmspropFunctor>( param_out->mutable_data(ctx.GetPlace()), mean_square_out->mutable_data(ctx.GetPlace()), diff --git a/paddle/fluid/operators/scale_op.cu b/paddle/fluid/operators/scale_op.cu index 04c802da12958a53626f533833c2709110531136..349f39360b8e3100a7f844d3e2d3768053c37c58 100644 --- a/paddle/fluid/operators/scale_op.cu +++ b/paddle/fluid/operators/scale_op.cu @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/scale_op.h" +#include "paddle/fluid/platform/float16.h" +namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( scale, @@ -20,4 +22,6 @@ REGISTER_OP_CUDA_KERNEL( paddle::operators::ScaleKernel, paddle::operators::ScaleKernel, paddle::operators::ScaleKernel); + int64_t>, + paddle::operators::ScaleKernel); diff --git a/paddle/fluid/operators/send_barrier_op.cc b/paddle/fluid/operators/send_barrier_op.cc index 40404295266899c6ac2f7b1e08fdf7db40958794..8ca2877d8adad643089587fcee0917affa537f7d 100644 --- a/paddle/fluid/operators/send_barrier_op.cc +++ b/paddle/fluid/operators/send_barrier_op.cc @@ -39,7 +39,8 @@ class SendBarrierOp : public framework::OperatorBase { std::vector eps = Attr>("endpoints"); distributed::RPCClient* rpc_client = - distributed::RPCClient::GetInstance(); + distributed::RPCClient::GetInstance( + Attr("trainer_id")); VLOG(3) << "SendBarrierOp sync"; @@ -67,6 +68,7 @@ This operator will send a send barrier signal to list_and_serv op, so that the Parameter Server would knew all variables have been sent. )DOC"); + AddAttr("trainer_id", "trainer id from 0 ~ worker_num.").SetDefault(0); AddAttr>("endpoints", "(string vector, default 127.0.0.1:6164)" "Server endpoints to send variables to.") diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index 48322ac7fd54a2e4cc3405a2c4dcddfc273f5a66..be1dc4bf14c3394963822b065ca088afbfacd858 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -44,7 +44,8 @@ class SendOp : public framework::OperatorBase { auto& ctx = *pool.Get(place); distributed::RPCClient* rpc_client = - distributed::RPCClient::GetInstance(); + distributed::RPCClient::GetInstance( + Attr("trainer_id")); std::vector rets; for (size_t i = 0; i < ins.size(); i++) { @@ -79,6 +80,7 @@ This operator will send variables to listen_and_serve op at the parameter server "(int, default 0)" "sync send or async send.") .SetDefault(0); + AddAttr("trainer_id", "trainer id from 0 ~ worker_num.").SetDefault(0); AddAttr>("epmap", "(string vector, default 127.0.0.1:6164)" "Server endpoints in the order of input " diff --git a/paddle/fluid/operators/sign_op.cc b/paddle/fluid/operators/sign_op.cc index f3985dcc027f974e0213a73ea9a21e268d77615f..6837856a6da804e27af2cd6c83052c04f17140d8 100644 --- a/paddle/fluid/operators/sign_op.cc +++ b/paddle/fluid/operators/sign_op.cc @@ -67,4 +67,5 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(sign, ops::SignOp, ops::SignOpMaker, ops::SignGradMaker); REGISTER_OP_CPU_KERNEL( - sign, ops::SignKernel); + sign, ops::SignKernel, + ops::SignKernel); diff --git a/paddle/fluid/operators/sign_op.cu b/paddle/fluid/operators/sign_op.cu index e0d7a87e6485a74dd1cdee1a05abc42eef460990..817e0fbbd511462f161633242d28e63062676eb9 100644 --- a/paddle/fluid/operators/sign_op.cu +++ b/paddle/fluid/operators/sign_op.cu @@ -13,7 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/sign_op.h" +#include "paddle/fluid/platform/float16.h" REGISTER_OP_CUDA_KERNEL( sign, - paddle::operators::SignKernel); + paddle::operators::SignKernel, + paddle::operators::SignKernel, + paddle::operators::SignKernel); diff --git a/paddle/fluid/operators/softmax_cudnn_op.cu.cc b/paddle/fluid/operators/softmax_cudnn_op.cu.cc index f6e241af0634650f4a32be6a4547617f8ec3ee60..ad3e5543f10ae05865565110ba2231c897c205b8 100644 --- a/paddle/fluid/operators/softmax_cudnn_op.cu.cc +++ b/paddle/fluid/operators/softmax_cudnn_op.cu.cc @@ -80,4 +80,5 @@ REGISTER_OP_KERNEL(softmax, CUDNN, plat::CUDAPlace, ops::SoftmaxCUDNNKernel); REGISTER_OP_KERNEL(softmax_grad, CUDNN, plat::CUDAPlace, ops::SoftmaxGradCUDNNKernel, - ops::SoftmaxGradCUDNNKernel); + ops::SoftmaxGradCUDNNKernel, + ops::SoftmaxGradCUDNNKernel); diff --git a/paddle/fluid/operators/softmax_op.cu.cc b/paddle/fluid/operators/softmax_op.cu.cc index 5fb4f011d9b47cebc4a23bcce47eada825263343..19359b7eef5126d84f0707d39095a74ae4561186 100644 --- a/paddle/fluid/operators/softmax_op.cu.cc +++ b/paddle/fluid/operators/softmax_op.cu.cc @@ -23,4 +23,5 @@ REGISTER_OP_CUDA_KERNEL( ops::SoftmaxKernel); REGISTER_OP_CUDA_KERNEL( softmax_grad, ops::SoftmaxGradKernel, - ops::SoftmaxGradKernel); + ops::SoftmaxGradKernel, + ops::SoftmaxGradKernel); diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc index 1a9324ec862fc3dd7ce669c5fed94527cac22b8f..2900221485e6ec097796ac38936ce31f8382c86a 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc @@ -44,6 +44,12 @@ class SoftmaxWithCrossEntropyOpMaker "(bool, default: false), A flag to indicate whether to interpretate " "the given labels as soft labels.") .SetDefault(false); + AddAttr( + "numeric_stable_mode", + "(bool, default: false), A flag to indicate whether to use more " + "numerically stable algorithm. This flag is only valid when " + "soft_label is false and GPU is used.") + .SetDefault(false); AddAttr( "ignore_index", "(int, default -100), Specifies a target value that is ignored and" diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu index a07c17348ebb3f768d1c8be65c2d31e3c130bd23..6d48796191dd13a45f0c7267bfaf05489f528a9d 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu @@ -17,6 +17,7 @@ limitations under the License. */ #include #include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/operators/softmax_with_cross_entropy_op.h" +#include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { @@ -117,8 +118,8 @@ using BlockReduceTempStorage = typename BlockReduce::TempStorage; // Make sure that BlockDim <= feature_size // This kernel is used to calculate the max element of each row template -__global__ void RowReductionForMax(const T* logits_data, T* max_data, - int feature_size) { +static __global__ void RowReductionForMax(const T* logits_data, T* max_data, + int feature_size) { __shared__ BlockReduceTempStorage temp_storage; auto beg_idx = feature_size * blockIdx.x + threadIdx.x; @@ -141,9 +142,10 @@ __global__ void RowReductionForMax(const T* logits_data, T* max_data, } // Make sure that BlockDim <= feature_size -template -__global__ void RowReductionForDiffMaxSum(const T* logits_data, T* max_data, - T* softmax, int feature_size) { +template +static __global__ void RowReductionForDiffMaxSum(const T* logits_data, + T* max_data, T* softmax, + int feature_size) { __shared__ BlockReduceTempStorage temp_storage; auto beg_idx = feature_size * blockIdx.x + threadIdx.x; @@ -153,24 +155,34 @@ __global__ void RowReductionForDiffMaxSum(const T* logits_data, T* max_data, softmax[beg_idx] = logits_data[beg_idx] - block_max; T diff_max_sum = real_exp(softmax[beg_idx]); - beg_idx += BlockDim; - while (beg_idx < end_idx) { - softmax[beg_idx] = logits_data[beg_idx] - block_max; - diff_max_sum += real_exp(softmax[beg_idx]); - beg_idx += BlockDim; + auto idx = beg_idx + BlockDim; + while (idx < end_idx) { + softmax[idx] = logits_data[idx] - block_max; + diff_max_sum += real_exp(softmax[idx]); + idx += BlockDim; } diff_max_sum = BlockReduce(temp_storage).Reduce(diff_max_sum, cub::Sum()); if (threadIdx.x == 0) max_data[blockIdx.x] = real_log(diff_max_sum); + + if (!CalculateLogSoftmax) return; + __syncthreads(); + diff_max_sum = max_data[blockIdx.x]; + softmax[beg_idx] -= diff_max_sum; + beg_idx += BlockDim; + while (beg_idx < end_idx) { + softmax[beg_idx] -= diff_max_sum; + beg_idx += BlockDim; + } + if (threadIdx.x == 0) max_data[blockIdx.x] = 0; } // Make sure that BlockDim <= feature_size template -__global__ void RowReductionForSoftmaxAndCrossEntropy(const T* logits_data, - const T* labels_data, - T* loss_data, T* softmax, - int feature_size) { +static __global__ void RowReductionForSoftmaxAndCrossEntropy( + const T* logits_data, const T* labels_data, T* loss_data, T* softmax, + int feature_size) { __shared__ BlockReduceTempStorage temp_storage; auto beg_idx = feature_size * blockIdx.x + threadIdx.x; @@ -194,11 +206,134 @@ __global__ void RowReductionForSoftmaxAndCrossEntropy(const T* logits_data, } template -__global__ void SetSoftmaxToOneWhenFeatureSizeIsOne(T* out, int batch_size) { +struct HardLabelSoftmaxWithCrossEntropyFunctor { + public: + HardLabelSoftmaxWithCrossEntropyFunctor(const T* logits, + const int64_t* labels, T* loss, + T* log_softmax, int feature_size) + : logits_(logits), + labels_(labels), + loss_(loss), + log_softmax_(log_softmax), + feature_size_(feature_size) {} + + __device__ void operator()(int idx) const { + auto row_idx = idx / feature_size_; + auto col_idx = idx % feature_size_; + if (col_idx != labels_[row_idx]) { + log_softmax_[idx] = real_exp(log_softmax_[idx]); + } else { + auto softmax = log_softmax_[idx]; + log_softmax_[idx] = real_exp(softmax); + loss_[row_idx] = -softmax; + } + } + + private: + const T* logits_; + const int64_t* labels_; + T* loss_; + T* log_softmax_; + int feature_size_; +}; + +template +struct HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx { + public: + HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx(const T* logits, + const int64_t* labels, + T* loss, T* log_softmax, + int feature_size, + int ignore_idx) + : logits_(logits), + labels_(labels), + loss_(loss), + log_softmax_(log_softmax), + feature_size_(feature_size), + ignore_idx_(ignore_idx) {} + + __device__ void operator()(int idx) const { + auto row_idx = idx / feature_size_; + auto col_idx = idx % feature_size_; + if (col_idx != labels_[row_idx] || col_idx == ignore_idx_) { + log_softmax_[idx] = real_exp(log_softmax_[idx]); + } else { + auto softmax = log_softmax_[idx]; + log_softmax_[idx] = real_exp(softmax); + loss_[row_idx] = -softmax; + } + } + + private: + const T* logits_; + const int64_t* labels_; + T* loss_; + T* log_softmax_; + int feature_size_; + int ignore_idx_; +}; + +template +static __global__ void SetSoftmaxToOneWhenFeatureSizeIsOne(T* out, + int batch_size) { auto idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < batch_size) out[idx] = static_cast(1); } +template +static void HardLabelSoftmaxWithCrossEntropy( + const platform::CUDADeviceContext& ctx, const T* logits_data, + const int64_t* labels_data, T* loss_data, T* softmax_data, int batch_size, + int feature_size, int ignore_idx) { + constexpr int kMaxBlockDim = 512; + int block_dim = feature_size >= kMaxBlockDim + ? kMaxBlockDim + : (1 << static_cast(std::log2(feature_size))); + auto stream = ctx.stream(); + +#define CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(BlockDim) \ + case BlockDim: { \ + RowReductionForMax<<>>( \ + logits_data, loss_data, feature_size); \ + RowReductionForDiffMaxSum<<>>( \ + logits_data, loss_data, softmax_data, feature_size); \ + platform::ForRange for_range( \ + ctx, batch_size* feature_size); \ + if (ignore_idx >= 0 && ignore_idx < feature_size) { \ + for_range(HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx( \ + logits_data, labels_data, loss_data, softmax_data, feature_size, \ + ignore_idx)); \ + } else { \ + for_range(HardLabelSoftmaxWithCrossEntropyFunctor( \ + logits_data, labels_data, loss_data, softmax_data, feature_size)); \ + } \ + } break + + switch (block_dim) { + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(512); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(256); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(128); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(64); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(32); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(16); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(8); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(4); + CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(2); + case 1: + SetSoftmaxToOneWhenFeatureSizeIsOne<<<(batch_size + kMaxBlockDim - 1) / + kMaxBlockDim, + kMaxBlockDim, 0, stream>>>( + softmax_data, batch_size); + cudaMemsetAsync(loss_data, 0, batch_size * sizeof(T), stream); + break; + default: + PADDLE_THROW("BlockDim must be 2^n in softmax_with_cross_entropy_op"); + break; + } +#undef CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL +} + template static void SoftmaxWithCrossEntropyFusedKernel(const T* logits_data, const T* labels_data, @@ -237,7 +372,7 @@ static void SoftmaxWithCrossEntropyFusedKernel(const T* logits_data, kMaxBlockDim, kMaxBlockDim, 0, stream>>>( softmax_data, batch_size); - cudaMemsetAsync(loss_data, 0, batch_size, stream); + cudaMemsetAsync(loss_data, 0, batch_size * sizeof(T), stream); break; default: PADDLE_THROW("BlockDim must be 2^n in softmax_with_cross_entropy_op"); @@ -272,11 +407,21 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { logits_data, labels_data, softmax_data, loss_data, batch_size, feature_size, context.cuda_device_context().stream()); } else { - math::SoftmaxCUDNNFunctor()(context.cuda_device_context(), logits, - softmax); - math::CrossEntropyFunctor()( - context.cuda_device_context(), loss, softmax, labels, false, - ignore_index); + if (!context.Attr("numeric_stable_mode")) { + math::SoftmaxCUDNNFunctor()(context.cuda_device_context(), logits, + softmax); + math::CrossEntropyFunctor()( + context.cuda_device_context(), loss, softmax, labels, false, + ignore_index); + } else { + int batch_size = logits->dims()[0]; + int feature_size = logits->dims()[1]; + auto* logits_data = logits->data(); + auto* labels_data = labels->data(); + HardLabelSoftmaxWithCrossEntropy( + context.cuda_device_context(), logits_data, labels_data, loss_data, + softmax_data, batch_size, feature_size, ignore_index); + } } } }; diff --git a/paddle/fluid/operators/spp_op.h b/paddle/fluid/operators/spp_op.h index 08cb7849d20443862b66ea6096c095b294c7242c..35d9737ee01fe1505cbe30e8ed735e6b92cb8df2 100644 --- a/paddle/fluid/operators/spp_op.h +++ b/paddle/fluid/operators/spp_op.h @@ -56,12 +56,14 @@ class SppKernel : public framework::OpKernel { math::Pool2dFunctor, T> pool_forward; math::MaxPool max_process; pool_forward(context.template device_context(), *in_x, - kernel_size, strides, paddings, max_process, &out_level); + kernel_size, strides, paddings, max_process, true, + &out_level); } else if (pooling_type == "avg") { math::Pool2dFunctor, T> pool_forward; math::AvgPool avg_process; pool_forward(context.template device_context(), *in_x, - kernel_size, strides, paddings, avg_process, &out_level); + kernel_size, strides, paddings, avg_process, true, + &out_level); } // flatten pooling output shape int output_flatten_w = in_x->dims()[1] * bins * bins; @@ -154,7 +156,7 @@ class SppGradKernel : public framework::OpKernel { math::AvgPoolGrad avg_process; pool_backward(context.template device_context(), *in_x, *&out_level, *&outgrad_level, kernel_size, strides, - paddings, avg_process, in_x_grad); + paddings, avg_process, true, in_x_grad); } } } diff --git a/paddle/fluid/operators/sum_op.cu b/paddle/fluid/operators/sum_op.cu index 89bcd1bbc86dc29cb7b98cbef3057a8f98c74555..db4c2d6c115f04b436db00854ca4b02fea09866b 100644 --- a/paddle/fluid/operators/sum_op.cu +++ b/paddle/fluid/operators/sum_op.cu @@ -11,10 +11,13 @@ limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/fluid/operators/sum_op.h" +#include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; +namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( sum, ops::SumKernel, ops::SumKernel, ops::SumKernel, - ops::SumKernel); + ops::SumKernel, + ops::SumKernel); diff --git a/paddle/fluid/operators/sum_op.h b/paddle/fluid/operators/sum_op.h index f6e12dfc76c6ce73f10e707387f6a9cedacde3c8..19b2c68c823adbed82319f7b04992baedd5d41f9 100644 --- a/paddle/fluid/operators/sum_op.h +++ b/paddle/fluid/operators/sum_op.h @@ -61,7 +61,7 @@ class SumKernel : public framework::OpKernel { if (start != 2) { math::SetConstant constant_functor; constant_functor(context.template device_context(), - out, 0.0); + out, static_cast(0)); } } diff --git a/paddle/fluid/operators/tensorrt_engine_op.h b/paddle/fluid/operators/tensorrt_engine_op.h index d4ba0f9c33c91811647f9d19a332f139c16b0eb2..673f86da76ee0712b4d941f5b33594f89926b973 100644 --- a/paddle/fluid/operators/tensorrt_engine_op.h +++ b/paddle/fluid/operators/tensorrt_engine_op.h @@ -223,7 +223,9 @@ class TensorRTEngineKernel : public framework::OpKernel { // Add outputs for (auto& output : output_maps) { - engine->DeclareOutput(output); + if (!engine->HasDeclared(output)) { + engine->DeclareOutput(output); + } } engine->FreezeNetwork(); diff --git a/paddle/fluid/operators/test_send_nccl_id.cc b/paddle/fluid/operators/test_send_nccl_id.cc index e2b7b6b8e447381229e4ad594b7974bc0aa159d5..b5426e17aac19dc07ee56545fac8472d9ef0d93c 100644 --- a/paddle/fluid/operators/test_send_nccl_id.cc +++ b/paddle/fluid/operators/test_send_nccl_id.cc @@ -92,7 +92,7 @@ TEST(SendNcclId, RPCServer) { std::string ep = string::Sprintf("127.0.0.1:%d", port); distributed::RPCClient* client = - distributed::RPCClient::GetInstance(); + distributed::RPCClient::GetInstance(0); LOG(INFO) << "connect to server" << ep; client->AsyncSendVar(ep, dev_ctx, scope, NCCL_ID_VARNAME); diff --git a/paddle/fluid/platform/cudnn_helper.h b/paddle/fluid/platform/cudnn_helper.h index bb8b14bb9fa41942c3aa653ca224c0842fbf9a00..07bb02be1962f758e50cab1f27de43e89f3953c3 100644 --- a/paddle/fluid/platform/cudnn_helper.h +++ b/paddle/fluid/platform/cudnn_helper.h @@ -76,8 +76,9 @@ enum class DataLayout { // Not use enum class PoolingMode { kMaximum, - kAverage, kMaximumDeterministic, + kAverageExclusive, + kAverageInclusive, }; #if CUDNN_VERSION < 6000 @@ -91,8 +92,10 @@ inline cudnnPoolingMode_t GetPoolingMode(const PoolingMode& mode) { switch (mode) { case PoolingMode::kMaximumDeterministic: return CUDNN_POOLING_MAX; - case PoolingMode::kAverage: + case PoolingMode::kAverageExclusive: return CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; + case PoolingMode::kAverageInclusive: + return CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING; case PoolingMode::kMaximum: return CUDNN_POOLING_MAX; default: @@ -105,8 +108,10 @@ inline cudnnPoolingMode_t GetPoolingMode(const PoolingMode& mode) { switch (mode) { case PoolingMode::kMaximumDeterministic: return CUDNN_POOLING_MAX_DETERMINISTIC; - case PoolingMode::kAverage: + case PoolingMode::kAverageExclusive: return CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; + case PoolingMode::kAverageInclusive: + return CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING; case PoolingMode::kMaximum: return CUDNN_POOLING_MAX; default: @@ -341,6 +346,28 @@ class ScopedPoolingDescriptor { DISABLE_COPY_AND_ASSIGN(ScopedPoolingDescriptor); }; +class ScopedSpatialTransformerDescriptor { + public: + ScopedSpatialTransformerDescriptor() { + PADDLE_ENFORCE(dynload::cudnnCreateSpatialTransformerDescriptor(&desc_)); + } + ~ScopedSpatialTransformerDescriptor() { + PADDLE_ENFORCE(dynload::cudnnDestroySpatialTransformerDescriptor(desc_)); + } + + template + inline cudnnSpatialTransformerDescriptor_t descriptor(const int nbDims, + const int dimA[]) { + PADDLE_ENFORCE(dynload::cudnnSetSpatialTransformerNdDescriptor( + desc_, CUDNN_SAMPLER_BILINEAR, CudnnDataType::type, nbDims, dimA)); + return desc_; + } + + private: + cudnnSpatialTransformerDescriptor_t desc_; + DISABLE_COPY_AND_ASSIGN(ScopedSpatialTransformerDescriptor); +}; + inline bool CanCUDNNBeUsed(const framework::ExecutionContext& ctx) { bool use_cudnn = ctx.Attr("use_cudnn"); use_cudnn &= paddle::platform::is_gpu_place(ctx.GetPlace()); diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index 924810bd61841139bc1849a000aaa57b07b71f0c..ff49a1d57fd977a6d6b4502b44e48aad34cde872 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -153,55 +153,31 @@ class EigenCudaStreamDevice : public Eigen::StreamInterface { mutable unsigned int* semaphore_; }; -class CudnnHolder { - public: - CudnnHolder(const cudaStream_t* stream, const CUDAPlace& place) - : workspace_(nullptr), workspace_len_(0), stream_(stream), place_(place) { - PADDLE_ENFORCE(dynload::cudnnCreate(&cudnn_handle_)); - PADDLE_ENFORCE(dynload::cudnnSetStream(cudnn_handle_, *stream_)); - } - - cudnnHandle_t cudnn_handle() const { return cudnn_handle_; } +CudnnHolder::CudnnHolder(const cudaStream_t* stream, const CUDAPlace& place) + : workspace_(nullptr), workspace_len_(0), stream_(stream), place_(place) { + PADDLE_ENFORCE(dynload::cudnnCreate(&cudnn_handle_)); + PADDLE_ENFORCE(dynload::cudnnSetStream(cudnn_handle_, *stream_)); +} - void RunFunc(const std::function& cudnn_func, - size_t required_workspace_len) { - std::lock_guard lock(mtx_); - if (required_workspace_len > workspace_len_) { - ReallocateWorkspace(required_workspace_len); - } - cudnn_func(workspace_); +CudnnHolder::~CudnnHolder() { + PADDLE_ENFORCE(dynload::cudnnDestroy(cudnn_handle_)); + if (workspace_ != nullptr) { + paddle::memory::Free(place_, workspace_); } +} - ~CudnnHolder() { - PADDLE_ENFORCE(dynload::cudnnDestroy(cudnn_handle_)); - if (workspace_ != nullptr) { - paddle::memory::Free(place_, workspace_); - } +void CudnnHolder::ReallocateWorkspace(size_t required_workspace_len) { + if (required_workspace_len <= workspace_len_) { + return; } - - private: - void ReallocateWorkspace(size_t required_workspace_len) { - if (required_workspace_len <= workspace_len_) { - return; - } - if (workspace_ != nullptr) { - // Maybe someone is using the current workspace - PADDLE_ENFORCE(cudaStreamSynchronize(*stream_)); - paddle::memory::Free(place_, workspace_); - } - workspace_ = paddle::memory::Alloc(place_, required_workspace_len); - workspace_len_ = required_workspace_len; + if (workspace_ != nullptr) { + // Maybe someone is using the current workspace + PADDLE_ENFORCE(cudaStreamSynchronize(*stream_)); + paddle::memory::Free(place_, workspace_); } - - cudnnHandle_t cudnn_handle_; - void* workspace_; - size_t workspace_len_; - - const cudaStream_t* stream_; // not owned; - const CUDAPlace place_; - - std::mutex mtx_; -}; + workspace_ = paddle::memory::Alloc(place_, required_workspace_len); + workspace_len_ = required_workspace_len; +} CUDADeviceContext::CUDADeviceContext(CUDAPlace place) : place_(place), cudnn_holder_(nullptr) { @@ -222,12 +198,12 @@ CUDADeviceContext::CUDADeviceContext(CUDAPlace place) driver_version_ = GetCUDADriverVersion(place_.device); runtime_version_ = GetCUDARuntimeVersion(place_.device); - LOG(INFO) << "device: " << place_.device - << ", CUDA Capability: " << compute_capability_ - << ", Driver Version: " << driver_version_ / 1000 << "." - << (driver_version_ % 100) / 10 - << ", Runtime Version: " << runtime_version_ / 1000 << "." - << (runtime_version_ % 100) / 10; + LOG_FIRST_N(WARNING, 1) << "Please NOTE: device: " << place_.device + << ", CUDA Capability: " << compute_capability_ + << ", Driver Version: " << driver_version_ / 1000 + << "." << (driver_version_ % 100) / 10 + << ", Runtime Version: " << runtime_version_ / 1000 + << "." << (runtime_version_ % 100) / 10; callback_manager_.reset(new StreamCallbackManager(stream_)); } @@ -269,9 +245,8 @@ cudnnHandle_t CUDADeviceContext::cudnn_handle() const { return cudnn_holder_->cudnn_handle(); } -void CUDADeviceContext::RunCudnnFuncWithWorkspace( - const std::function& cudnn_func, size_t workspace_len) const { - cudnn_holder_->RunFunc(cudnn_func, workspace_len); +CudnnWorkspaceHandle CUDADeviceContext::cudnn_workspace_handle() const { + return CudnnWorkspaceHandle(cudnn_holder_.get()); } cudaStream_t CUDADeviceContext::stream() const { return stream_; } diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index 0240b9380f3213b2a030061007e04abe1d73c6e3..df248f9bb15591d5015ad01278797ec7e31ef9d1 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -73,7 +73,60 @@ struct DefaultDeviceContextType { #ifdef PADDLE_WITH_CUDA class EigenCudaStreamDevice; -class CudnnHolder; +class CudnnHolder { + public: + CudnnHolder(const cudaStream_t* stream, const CUDAPlace& place); + ~CudnnHolder(); + cudnnHandle_t cudnn_handle() const { return cudnn_handle_; } + + private: + friend class CudnnWorkspaceHandle; + void ReallocateWorkspace(size_t required_workspace_len); + + template + void RunFuncImpl(Callback&& cudnn_func, size_t required_workspace_len) { + if (required_workspace_len > workspace_len_) { + ReallocateWorkspace(required_workspace_len); + } + cudnn_func(workspace_); + } + + std::mutex& Mutex() { return mtx_; } + + cudnnHandle_t cudnn_handle_; + void* workspace_; + size_t workspace_len_; + + const cudaStream_t* stream_; // not owned; + const CUDAPlace place_; + + std::mutex mtx_; +}; + +class CudnnWorkspaceHandle { + public: + /*! \brief The lock would not be acquired when constructor calls. + * The lock would be acquired when RunFunc() is called first time. */ + inline explicit CudnnWorkspaceHandle(CudnnHolder* holder) : holder_(holder) {} + + /*! \brief Thread which call RunFunc() would acquire the lock first + * before invoking cudnn functions. */ + template + inline void RunFunc(Callback&& cudnn_func, size_t required_workspace_len) { + if (!guard_) { + guard_.reset(new std::lock_guard(holder_->Mutex())); + } + holder_->RunFuncImpl(std::forward(cudnn_func), + required_workspace_len); + } + + CudnnWorkspaceHandle(CudnnWorkspaceHandle&&) = default; + CudnnWorkspaceHandle& operator=(CudnnWorkspaceHandle&&) = delete; + + private: + CudnnHolder* holder_; // not own + std::unique_ptr> guard_; +}; class CUDADeviceContext : public DeviceContext { public: @@ -101,10 +154,14 @@ class CUDADeviceContext : public DeviceContext { /*! \brief Return cudnn handle in the device context. */ cudnnHandle_t cudnn_handle() const; - /*! \brief Run a cudnn function with the workspace provided by - * CUDADeviceContext */ - void RunCudnnFuncWithWorkspace(const std::function& cudnn_func, - size_t workspace_len) const; + /*! \brief Return a cudnn workspace handle to call multiple cudnn + * functions without interrupting by other threads. + * Once the first cudnn function is called by the handle, a lock + * would be acquired to prevent other threads from accessing the + * workspace. Once the handle is destructed, the lock would be released. + * CudnnWorkspaceHandle is an RAII object to implement thread-safe + * sequential cudnn function calls. */ + CudnnWorkspaceHandle cudnn_workspace_handle() const; /*! \brief Return cuda stream in the device context. */ cudaStream_t stream() const; diff --git a/paddle/fluid/platform/dynload/cudnn.h b/paddle/fluid/platform/dynload/cudnn.h index e6353f67ef118072a2d8e49111e8ecc486589998..d3d754b6f58d25a9dfacafaf55d50b353a71ee6d 100644 --- a/paddle/fluid/platform/dynload/cudnn.h +++ b/paddle/fluid/platform/dynload/cudnn.h @@ -65,44 +65,51 @@ extern void EnforceCUDNNLoaded(const char* fn_name); * include all needed cudnn functions in HPPL * different cudnn version has different interfaces **/ -#define CUDNN_DNN_ROUTINE_EACH(__macro) \ - __macro(cudnnSetTensor4dDescriptor); \ - __macro(cudnnSetTensor4dDescriptorEx); \ - __macro(cudnnSetTensorNdDescriptor); \ - __macro(cudnnGetTensorNdDescriptor); \ - __macro(cudnnGetConvolutionNdForwardOutputDim); \ - __macro(cudnnGetConvolutionForwardAlgorithm); \ - __macro(cudnnCreateTensorDescriptor); \ - __macro(cudnnDestroyTensorDescriptor); \ - __macro(cudnnCreateFilterDescriptor); \ - __macro(cudnnSetFilter4dDescriptor); \ - __macro(cudnnSetFilterNdDescriptor); \ - __macro(cudnnGetFilterNdDescriptor); \ - __macro(cudnnSetPooling2dDescriptor); \ - __macro(cudnnSetPoolingNdDescriptor); \ - __macro(cudnnGetPoolingNdDescriptor); \ - __macro(cudnnDestroyFilterDescriptor); \ - __macro(cudnnCreateConvolutionDescriptor); \ - __macro(cudnnCreatePoolingDescriptor); \ - __macro(cudnnDestroyPoolingDescriptor); \ - __macro(cudnnSetConvolution2dDescriptor); \ - __macro(cudnnDestroyConvolutionDescriptor); \ - __macro(cudnnSetConvolutionNdDescriptor); \ - __macro(cudnnGetConvolutionNdDescriptor); \ - __macro(cudnnDeriveBNTensorDescriptor); \ - __macro(cudnnCreate); \ - __macro(cudnnDestroy); \ - __macro(cudnnSetStream); \ - __macro(cudnnActivationForward); \ - __macro(cudnnConvolutionForward); \ - __macro(cudnnConvolutionBackwardBias); \ - __macro(cudnnGetConvolutionForwardWorkspaceSize); \ - __macro(cudnnTransformTensor); \ - __macro(cudnnPoolingForward); \ - __macro(cudnnPoolingBackward); \ - __macro(cudnnSoftmaxBackward); \ - __macro(cudnnSoftmaxForward); \ - __macro(cudnnGetVersion); \ +#define CUDNN_DNN_ROUTINE_EACH(__macro) \ + __macro(cudnnSetTensor4dDescriptor); \ + __macro(cudnnSetTensor4dDescriptorEx); \ + __macro(cudnnSetTensorNdDescriptor); \ + __macro(cudnnGetTensorNdDescriptor); \ + __macro(cudnnGetConvolutionNdForwardOutputDim); \ + __macro(cudnnGetConvolutionForwardAlgorithm); \ + __macro(cudnnCreateTensorDescriptor); \ + __macro(cudnnDestroyTensorDescriptor); \ + __macro(cudnnCreateFilterDescriptor); \ + __macro(cudnnSetFilter4dDescriptor); \ + __macro(cudnnSetFilterNdDescriptor); \ + __macro(cudnnGetFilterNdDescriptor); \ + __macro(cudnnSetPooling2dDescriptor); \ + __macro(cudnnSetPoolingNdDescriptor); \ + __macro(cudnnGetPoolingNdDescriptor); \ + __macro(cudnnDestroyFilterDescriptor); \ + __macro(cudnnCreateConvolutionDescriptor); \ + __macro(cudnnCreatePoolingDescriptor); \ + __macro(cudnnDestroyPoolingDescriptor); \ + __macro(cudnnSetConvolution2dDescriptor); \ + __macro(cudnnDestroyConvolutionDescriptor); \ + __macro(cudnnSetConvolutionNdDescriptor); \ + __macro(cudnnGetConvolutionNdDescriptor); \ + __macro(cudnnDeriveBNTensorDescriptor); \ + __macro(cudnnCreateSpatialTransformerDescriptor); \ + __macro(cudnnSetSpatialTransformerNdDescriptor); \ + __macro(cudnnDestroySpatialTransformerDescriptor); \ + __macro(cudnnSpatialTfGridGeneratorForward); \ + __macro(cudnnSpatialTfGridGeneratorBackward); \ + __macro(cudnnSpatialTfSamplerForward); \ + __macro(cudnnSpatialTfSamplerBackward); \ + __macro(cudnnCreate); \ + __macro(cudnnDestroy); \ + __macro(cudnnSetStream); \ + __macro(cudnnActivationForward); \ + __macro(cudnnConvolutionForward); \ + __macro(cudnnConvolutionBackwardBias); \ + __macro(cudnnGetConvolutionForwardWorkspaceSize); \ + __macro(cudnnTransformTensor); \ + __macro(cudnnPoolingForward); \ + __macro(cudnnPoolingBackward); \ + __macro(cudnnSoftmaxBackward); \ + __macro(cudnnSoftmaxForward); \ + __macro(cudnnGetVersion); \ __macro(cudnnGetErrorString); CUDNN_DNN_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_CUDNN_WRAP) diff --git a/paddle/fluid/platform/init.cc b/paddle/fluid/platform/init.cc index 1cc5a3d49f6c440f92b0e88689f640a1185af07e..f61abfc43d4acb89d6cd522106e7093296a036b6 100644 --- a/paddle/fluid/platform/init.cc +++ b/paddle/fluid/platform/init.cc @@ -116,21 +116,51 @@ void InitDevices(bool init_p2p, const std::vector devices) { platform::SetNumThreads(FLAGS_paddle_num_threads); #endif - if (platform::jit::MayIUse(platform::jit::avx512f)) { -#ifndef __AVX512F__ - LOG(WARNING) << "AVX512F is available, Please re-compile on local machine"; +#if !defined(_WIN32) && !defined(__APPLE__) && !defined(__OSX__) + if (platform::jit::MayIUse(platform::jit::avx)) { +#ifndef __AVX__ + LOG(WARNING) << "AVX is available, Please re-compile on local machine"; #endif } - if (platform::jit::MayIUse(platform::jit::avx2)) { -#ifndef __AVX2__ - LOG(WARNING) << "AVX2 is available, Please re-compile on local machine"; + +// Throw some informations when CPU instructions mismatch. +#define AVX_GUIDE(compiletime, runtime) \ + LOG(FATAL) \ + << "This version is compiled on higher instruction(" #compiletime \ + ") system, you may encounter illegal instruction error running on" \ + " your local CPU machine. Please reinstall the " #runtime \ + " version or compile from source code." + +#ifdef __AVX512F__ + if (!platform::jit::MayIUse(platform::jit::avx512f)) { + if (platform::jit::MayIUse(platform::jit::avx2)) { + AVX_GUIDE(AVX512, AVX2); + } else if (platform::jit::MayIUse(platform::jit::avx)) { + AVX_GUIDE(AVX512, AVX); + } else { + AVX_GUIDE(AVX512, NonAVX); + } + } #endif + +#ifdef __AVX2__ + if (!platform::jit::MayIUse(platform::jit::avx2)) { + if (platform::jit::MayIUse(platform::jit::avx)) { + AVX_GUIDE(AVX2, AVX); + } else { + AVX_GUIDE(AVX2, NonAVX); + } } - if (platform::jit::MayIUse(platform::jit::avx)) { -#ifndef __AVX__ - LOG(WARNING) << "AVX is available, Please re-compile on local machine"; #endif + +#ifdef __AVX__ + if (!platform::jit::MayIUse(platform::jit::avx)) { + AVX_GUIDE(AVX, NonAVX); } +#endif +#undef AVX_GUIDE + +#endif } void InitGLOG(const std::string &prog_name) { diff --git a/paddle/fluid/platform/mkldnn_helper.h b/paddle/fluid/platform/mkldnn_helper.h index c0a2543ba5d8ff8f34cb6231c51cb5053a6a9481..814012e6c1fad414d10f5a64af283bed57e11fe3 100644 --- a/paddle/fluid/platform/mkldnn_helper.h +++ b/paddle/fluid/platform/mkldnn_helper.h @@ -187,6 +187,29 @@ class MKLDNNHandler { return mem_p; } + std::shared_ptr AcquireMemory( + const std::shared_ptr& user_memory_p, + const std::shared_ptr& target_memory_p, + const std::string& suffix, + std::vector& pipeline) { // NOLINT + auto local_key = key_ + suffix; + auto key_reorder_p = key_ + suffix + "reorder_p"; + + auto stored_reorder_p = std::static_pointer_cast( + dev_ctx_.GetBlob(key_reorder_p)); + + if (stored_reorder_p) { + pipeline.push_back(*stored_reorder_p); + } else { + auto reorder_p = + std::make_shared(*user_memory_p, *target_memory_p); + dev_ctx_.SetBlob(key_reorder_p, reorder_p); + pipeline.push_back(*reorder_p); + } + + return target_memory_p; + } + std::shared_ptr AcquireMemory( mkldnn::memory::primitive_desc& mpd, // NOLINT mkldnn::memory::primitive_desc& user_mpd, // NOLINT diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index da46a1abe12258b47b2fd4afb5f146daf15e026d..56bf9e31a35fdec5b7f04849068ff96ac9776c0e 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -226,7 +226,7 @@ RecordBlock::~RecordBlock() { void EnableProfiler(ProfilerState state) { PADDLE_ENFORCE(state != ProfilerState::kDisabled, - "Can't enbale profling, since the input state is ", + "Can't enable profiling, since the input state is ", "ProfilerState::kDisabled"); std::lock_guard l(profiler_mu); diff --git a/paddle/fluid/platform/stream_callback_manager.h b/paddle/fluid/platform/stream_callback_manager.h index 5f10137dcf85af1f5e1678ec68c8d9da460c5bdc..3cd1628a0b16d5974455a42b41f399461bc168b0 100644 --- a/paddle/fluid/platform/stream_callback_manager.h +++ b/paddle/fluid/platform/stream_callback_manager.h @@ -24,8 +24,6 @@ namespace paddle { namespace platform { -using StreamCallback = std::function; - class StreamCallbackManager; struct StreamCallbackContext { @@ -35,7 +33,7 @@ struct StreamCallbackContext { : manager_(manager), callback_(callback) {} const StreamCallbackManager *manager_; // do not own - StreamCallback callback_; + std::function callback_; }; class StreamCallbackManager { @@ -45,16 +43,18 @@ class StreamCallbackManager { template inline void AddCallback(Callback &&callback) const { - AddCallbackWithStreamAndErrorInfo( - [=](cudaStream_t, cudaError_t) { callback(); }); - } - - template - inline void AddCallbackWithStreamAndErrorInfo(Callback &&callback) const { - auto *stream_callback_context = new StreamCallbackContext(this, callback); - PADDLE_ENFORCE(cudaStreamAddCallback( - stream_, StreamCallbackManager::StreamCallbackFunc, - stream_callback_context, 0)); + auto *stream_callback_context = + new StreamCallbackContext(this, std::forward(callback)); + PADDLE_ENFORCE( +#if CUDA_VERSION >= 10000 + cudaLaunchHostFunc(stream_, StreamCallbackManager::StreamCallbackFunc, + stream_callback_context) +#else + cudaStreamAddCallback(stream_, + StreamCallbackManager::StreamCallbackFunc, + stream_callback_context, 0) +#endif + ); // NOLINT } void Wait() const { thread_pool_.reset(new ThreadPool(1)); } @@ -63,17 +63,21 @@ class StreamCallbackManager { const cudaStream_t stream_; mutable std::unique_ptr thread_pool_; - // cudaStreamCallback cannot call CUDA API inside, so we have to use - // thread_pool here +// cudaStreamCallback cannot call CUDA API inside, so we have to use +// thread_pool here +#if CUDA_VERSION >= 10000 + static void CUDART_CB StreamCallbackFunc(void *user_data) +#else static void CUDART_CB StreamCallbackFunc(cudaStream_t stream, - cudaError_t status, - void *user_data) { + cudaError_t status, void *user_data) +#endif + { auto *callback_context_ptr = reinterpret_cast(user_data); callback_context_ptr->manager_->thread_pool_->enqueue([=]() { std::unique_ptr callback_context( callback_context_ptr); - callback_context->callback_(stream, status); + callback_context->callback_(); }); } }; diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 9dbb2928d393b67773672a67885a80e08fa7dedb..6cba3395bf725532aa9b2946fe243e354176999f 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -761,7 +761,12 @@ All parameter, weight, gradient are variables in Paddle. will clean up the temp variables at the end of the current iteration. 2. In some NLP model, it may cause the GPU memory is insufficient, in this case, you should reduce `num_iteration_per_drop_scope`. - )DOC"); + )DOC") + .def_property("_dry_run", + [](const ExecutionStrategy &self) { return self.dry_run_; }, + [](ExecutionStrategy &self, bool dry_run) { + self.dry_run_ = dry_run; + }); exec_strategy.def_property( "use_experimental_executor", @@ -840,6 +845,24 @@ All parameter, weight, gradient are variables in Paddle. [](BuildStrategy &self, bool b) { self.enable_data_balance_ = b; }) // FIXME(chengudo): enable_data_balance seems not important + .def_property( + "enable_sequential_execution", + [](const BuildStrategy &self) { + return self.enable_sequential_execution_; + }, + [](BuildStrategy &self, bool b) { + self.enable_sequential_execution_ = b; + }, + R"DOC(The type is BOOL. If set True, the execution order of ops would be the same as what is in the program. Default False.)DOC") + .def_property( + "remove_unnecessary_lock", + [](const BuildStrategy &self) { + return self.remove_unnecessary_lock_; + }, + [](BuildStrategy &self, bool b) { + self.remove_unnecessary_lock_ = b; + }, + R"DOC(The type is BOOL. If set True, some locks in GPU ops would be released and ParallelExecutor would run faster. Default False.)DOC") .def_property( "fuse_elewise_add_act_ops", [](const BuildStrategy &self) { diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index a29562b0692684a52a2f022023ea57c3ca1ef712..d7676f89ab5e781f910f98d03e72d5f7c1023a9a 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -147,7 +147,6 @@ function cmake_gen() { -DWITH_SWIG_PY=${WITH_SWIG_PY:-ON} -DCUDNN_ROOT=/usr/ -DWITH_TESTING=${WITH_TESTING:-ON} - -DWITH_FAST_BUNDLE_TEST=ON -DCMAKE_MODULE_PATH=/opt/rocm/hip/cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} @@ -180,7 +179,6 @@ EOF -DWITH_PYTHON=${WITH_PYTHON:-ON} \ -DCUDNN_ROOT=/usr/ \ -DWITH_TESTING=${WITH_TESTING:-ON} \ - -DWITH_FAST_BUNDLE_TEST=ON \ -DCMAKE_MODULE_PATH=/opt/rocm/hip/cmake \ -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} \ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 70c1f9589967e952598a112233b180e2865a8e40..ced1da7ad988a03621839b73db8bef8d0648af37 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -125,7 +125,6 @@ def __bootstrap__(): if core.is_compiled_with_dist(): read_env_flags.append('rpc_deadline') - read_env_flags.append('rpc_server_profile_period') read_env_flags.append('rpc_server_profile_path') read_env_flags.append('enable_rpc_profiler') read_env_flags.append('rpc_send_thread_num') diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 604f3eacd75beff306915b224b30c369dd3a486f..8936d884dd9e1ebbe5f688c11430b64e51ad8bd5 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -65,7 +65,7 @@ def is_persistable(var): Examples: .. code-block:: python - param = fluid.default_main_program().global_block().var('fc.w') + param = fluid.default_main_program().global_block().var('fc.b') res = fluid.io.is_persistable(param) """ if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ @@ -625,8 +625,13 @@ def save_inference_model(dirname, main_program._distributed_lookup_table, main_program._endpoints) - if not os.path.isdir(dirname): + # when a pserver and a trainer running on the same machine, mkdir may conflict + try: os.makedirs(dirname) + except OSError as e: + if e.errno != errno.EEXIST: + raise + if model_filename is not None: model_basename = os.path.basename(model_filename) else: @@ -884,12 +889,13 @@ def _load_slice_up_vars(executor, dirname, slice_vars_and_attrs): load_prog = Program() load_block = load_prog.global_block() + need_delete_vars = [] for var_tuple in slice_vars_and_attrs: orig_var = var_tuple[0] start = var_tuple[1] slice_var = var_tuple[2] - end = start + reduce(lambda x, y: x * y, slice_var.shape) + end = start + slice_var.shape[0] clone_orig_var = load_block.create_var( name=orig_var.name, @@ -917,5 +923,8 @@ def _load_slice_up_vars(executor, dirname, slice_vars_and_attrs): attrs={'axes': [0], 'starts': [start], 'ends': [end]}) - + need_delete_vars.append(clone_orig_var) + load_block.append_op( + type='delete_var', + inputs={'X': need_delete_vars}, ) executor.run(load_prog) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 459be4339b01228ba1b8f18e17e472ed72928511..9730fbf510cbe8c323b761b29821710f2c14a81d 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -1586,8 +1586,7 @@ class DynamicRNN(object): self.lod_rank_table = None self.max_seq_len = None self.step_idx = None - self.zero_idx = fill_constant( - shape=[1], value=0, dtype='int64', force_cpu=True) + self.zero_idx = None self.mem_dict = dict() self.output_array = [] self.outputs = [] @@ -1792,6 +1791,7 @@ class DynamicRNN(object): """ self._assert_in_rnn_block_('memory') + self._init_zero_idx_() if init is not None: if not isinstance(init, Variable): raise TypeError( @@ -1905,6 +1905,22 @@ class DynamicRNN(object): array_write(x=each, i=self.step_idx, array=outside_array) self.output_array.append(outside_array) + def _init_zero_idx_(self): + if self.zero_idx is None: + parent_block = self._parent_block_() + self.zero_idx = parent_block.create_var( + name=unique_name.generate('zero_idx'), dtype='int64') + parent_block.append_op( + type='fill_constant', + inputs={}, + outputs={'Out': [self.zero_idx]}, + attrs={ + 'shape': [1], + 'dtype': self.zero_idx.dtype, + 'value': float(0), + 'force_cpu': True + }) + def _parent_block_(self): prog = self.helper.main_program parent_idx = prog.current_block().parent_idx diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index e2d304dc86e731067e63cef4ee905368965365c6..d50f6744dfc3e25ec169e0fe5ee8e0b84a28e6ee 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -31,7 +31,8 @@ from ..unique_name import generate as unique_name __all__ = [ 'data', 'open_files', 'read_file', 'shuffle', 'batch', 'double_buffer', - 'random_data_generator', 'py_reader', 'Preprocessor', 'load' + 'random_data_generator', 'py_reader', 'create_py_reader_by_data', + 'Preprocessor', 'load' ] @@ -61,7 +62,7 @@ def data(name, For example if shape=[1], the resulting shape is [-1, 1]. 2. If shape contains -1, such as shape=[1, -1], append_batch_size will be enforced to be be False (ineffective). - dtype(int|float): The type of data : float32, float_16, int etc + dtype(basestring): The type of data : float32, float_16, int etc type(VarType): The output type. By default it is LOD_TENSOR. lod_level(int): The LoD Level. 0 means the input data is not a sequence. stop_gradient(bool): A boolean that mentions whether gradient should flow. @@ -316,6 +317,7 @@ def _copy_reader_var_(block, var): new_var = block.create_var(name=var.name, type=core.VarDesc.VarType.READER) new_var.desc.set_shapes(var.desc.shapes()) new_var.desc.set_dtypes(var.desc.dtypes()) + new_var.desc.set_lod_levels(var.desc.lod_levels()) new_var.persistable = True return new_var @@ -476,6 +478,159 @@ def random_data_generator(low, high, shapes, lod_levels, for_parallel=True): return monkey_patch_reader_methods(main_prog_var) +def _py_reader(capacity, + shapes, + dtypes, + lod_levels=None, + name=None, + use_double_buffer=True, + feed_list=None): + + if feed_list is not None: + if not isinstance(feed_list, list): + raise TypeError("feed_list should be a list of Variable" + " instead of " + str(type(feed_list))) + lod_levels = [] + dtypes = [] + shape_concat = [] + ranks = [] + shapes = [] + + for feed_data in feed_list: + dtypes.append(feed_data.dtype) + shape_concat.extend(feed_data.shape) + ranks.append(len(feed_data.shape)) + shapes.append(feed_data.shape) + lod_levels.append(feed_data.lod_level) + else: + dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] + shape_concat = [] + ranks = [] + + for shape in shapes: + shape_concat.extend(shape) + ranks.append(len(shape)) + + if lod_levels is None: + lod_levels = [0] * len(shapes) + + if name is None: + queue_name = unique_name('lod_tensor_blocking_queue') + reader_name = unique_name('create_py_reader') + double_buffer_name = unique_name('double_buffer') + else: + queue_name = "_".join([name, "queue"]) + reader_name = "_".join([name, "reader"]) + double_buffer_name = "_".join([name, "double_buffer"]) + + var = global_scope().var(queue_name) + feed_queue = core.init_lod_tensor_blocking_queue(var, capacity, shapes) + + startup_blk = default_startup_program().current_block() + startup_var = startup_blk.create_var(name=reader_name) + startup_blk.append_op( + type='create_py_reader', + inputs={'blocking_queue': [queue_name]}, + outputs={'Out': [startup_var]}, + attrs={ + 'shape_concat': shape_concat, + 'lod_levels': lod_levels, + 'ranks': ranks + }) + + startup_var.desc.set_dtypes(dtypes) + startup_var.persistable = True + + main_prog_var = _copy_reader_var_(default_main_program().current_block(), + startup_var) + + reader = monkey_patch_reader_methods(main_prog_var) + if use_double_buffer: + double_buffer_reader = double_buffer(reader, name=double_buffer_name) + # we return a double buffer reader. However, the reset method comes from + # py_reader. + double_buffer_reader.reset = reader.reset + reader = double_buffer_reader + + # monkey patch py_reader special methods + reader.queue = feed_queue + current_reset_method = reader.reset + reader.thread = None + reader.tensor_provider = None + reader.exited = False + + def start_provide_thread(func): + def __provider_thread__(): + for tensors in func(): + array = core.LoDTensorArray() + for item in tensors: + if not isinstance(item, core.LoDTensor): + tmp = core.LoDTensor() + tmp.set(item, core.CPUPlace()) + item = tmp + + array.append(item) + + if reader.exited: + break + feed_queue.push(array) + if reader.exited: + break + feed_queue.close() + + reader.thread = threading.Thread(target=__provider_thread__) + reader.thread.daemon = True + reader.thread.start() + + def __set_tensor_provider__(func): + reader.tensor_provider = func + + def __set_paddle_reader__(paddle_reader): + with program_guard(Program(), Program()): + actual_feed_list = feed_list + if actual_feed_list is None: + actual_feed_list = [] + counter = 0 + for dtype, shape, lod_level in zip(dtypes, shapes, lod_levels): + name = str(counter) + actual_feed_list.append( + data( + name=name, + dtype=dtype, + shape=shape, + lod_level=lod_level)) + counter += 1 + + data_names = [feed_data.name for feed_data in actual_feed_list] + feeder = DataFeeder( + feed_list=actual_feed_list, place=core.CPUPlace()) + paddle_reader = feeder.decorate_reader( + paddle_reader, multi_devices=False) + + def __tensor_provider__(): + for slots in paddle_reader(): + yield [slots[data_name] for data_name in data_names] + + __set_tensor_provider__(__tensor_provider__) + + def __reset__(): + current_reset_method() + if reader.thread is not None and reader.tensor_provider is not None: + reader.exited = True + reader.thread.join() + reader.exited = False + + def __start__(): + start_provide_thread(reader.tensor_provider) + + reader.reset = __reset__ + reader.decorate_tensor_provider = __set_tensor_provider__ + reader.decorate_paddle_reader = __set_paddle_reader__ + reader.start = __start__ + + return reader + + def py_reader(capacity, shapes, dtypes, @@ -600,128 +755,72 @@ def py_reader(capacity, >>> except fluid.core.EOFException: >>> test_reader.reset() """ - dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] - shape_concat = [] - ranks = [] - - for shape in shapes: - shape_concat.extend(shape) - ranks.append(len(shape)) - - if lod_levels is None: - lod_levels = [0] * len(shapes) - - if name is None: - queue_name = unique_name('lod_tensor_blocking_queue') - reader_name = unique_name('create_py_reader') - double_buffer_name = unique_name('double_buffer') - else: - queue_name = "_".join([name, "queue"]) - reader_name = "_".join([name, "reader"]) - double_buffer_name = "_".join([name, "double_buffer"]) - - var = global_scope().var(queue_name) - feed_queue = core.init_lod_tensor_blocking_queue(var, capacity, shapes) - - startup_blk = default_startup_program().current_block() - startup_var = startup_blk.create_var(name=reader_name) - startup_blk.append_op( - type='create_py_reader', - inputs={'blocking_queue': [queue_name]}, - outputs={'Out': [startup_var]}, - attrs={ - 'shape_concat': shape_concat, - 'lod_levels': lod_levels, - 'ranks': ranks - }) - - startup_var.desc.set_dtypes(dtypes) - startup_var.persistable = True - - main_prog_var = _copy_reader_var_(default_main_program().current_block(), - startup_var) - - reader = monkey_patch_reader_methods(main_prog_var) - if use_double_buffer: - double_buffer_reader = double_buffer(reader, name=double_buffer_name) - # we return a double buffer reader. However, the reset method comes from - # py_reader. - double_buffer_reader.reset = reader.reset - reader = double_buffer_reader - - # monkey patch py_reader special methods - reader.queue = feed_queue - current_reset_method = reader.reset - reader.thread = None - reader.tensor_provider = None - reader.exited = False - - def start_provide_thread(func): - def __provider_thread__(): - for tensors in func(): - array = core.LoDTensorArray() - for item in tensors: - if not isinstance(item, core.LoDTensor): - tmp = core.LoDTensor() - tmp.set(item, core.CPUPlace()) - item = tmp - - array.append(item) - - if reader.exited: - break - feed_queue.push(array) - if reader.exited: - break - feed_queue.close() + return _py_reader( + capacity=capacity, + shapes=shapes, + dtypes=dtypes, + lod_levels=lod_levels, + name=name, + use_double_buffer=use_double_buffer) - reader.thread = threading.Thread(target=__provider_thread__) - reader.thread.daemon = True - reader.thread.start() - def __set_tensor_provider__(func): - reader.tensor_provider = func +def create_py_reader_by_data(capacity, + feed_list, + name=None, + use_double_buffer=True): + """ + Create a Python reader for data feeding in Python - def __set_paddle_reader__(paddle_reader): - with program_guard(Program(), Program()): - feed_list = [] - counter = 0 - for dtype, shape, lod_level in zip(dtypes, shapes, lod_levels): - name = str(counter) - feed_list.append( - data( - name=name, - dtype=dtype, - shape=shape, - lod_level=lod_level)) - counter += 1 - - feeder = DataFeeder(feed_list=feed_list, place=core.CPUPlace()) - paddle_reader = feeder.decorate_reader( - paddle_reader, multi_devices=False) + This layer returns a Reader Variable. - def __tensor_provider__(): - for slots in paddle_reader(): - yield [slots[str(idx)] for idx in six.moves.xrange(counter)] + Works much like py_reader except that it's input is feed_list + instead of shapes, dtypes and lod_levels - __set_tensor_provider__(__tensor_provider__) + Args: + capacity(int): The buffer capacity maintained by :code:`py_reader`. + feed_list(list(Variable)): The data feed list. + name(basestring): The prefix Python queue name and Reader name. None will + be generated automatically. + use_double_buffer(bool): Whether use double buffer or not. - def __reset__(): - current_reset_method() - if reader.thread is not None and reader.tensor_provider is not None: - reader.exited = True - reader.thread.join() - reader.exited = False + Returns: + Variable: A Reader from which we can get feeding data. - def __start__(): - start_provide_thread(reader.tensor_provider) + Examples: - reader.reset = __reset__ - reader.decorate_tensor_provider = __set_tensor_provider__ - reader.decorate_paddle_reader = __set_paddle_reader__ - reader.start = __start__ + 1. The basic usage of :code:`py_reader` is as follows: - return reader + >>> import paddle.fluid as fluid + >>> import paddle.dataset.mnist as mnist + >>> + >>> image = fluid.layers.data(name='image', shape=[3,224,224], dtypes='float32') + >>> label = fluid.layers.data(name='label', shape=[1], dtypes='int64') + >>> reader = fluid.layers.create_py_reader_by_data(capacity=64, feed_list=[image, label]) + >>> reader.decorate_paddle_reader( + >>> paddle.reader.shuffle(paddle.batch(mnist.train()) + >>> + >>> img, label = fluid.layers.read_file(reader) + >>> loss = network(img, label) # some network definition + >>> + >>> fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program()) + >>> + >>> exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name) + >>> for epoch_id in range(10): + >>> reader.start() + >>> try: + >>> while True: + >>> exe.run(fetch_list=[loss.name]) + >>> except fluid.core.EOFException: + >>> reader.reset() + """ + return _py_reader( + capacity=capacity, + shapes=None, + dtypes=None, + lod_levels=None, + name=name, + use_double_buffer=use_double_buffer, + feed_list=feed_list) def open_files(filenames, diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index d201357e6f8607f4d5f0405889958d5282b74c23..0b9f20987535e0405be50ea9a7d1d3afa7161bb6 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -152,9 +152,11 @@ __all__ = [ 'mul', 'sigmoid_cross_entropy_with_logits', 'maxout', + 'affine_grid', 'sequence_reverse', 'affine_channel', 'hash', + 'grid_sampler', 'log_loss', 'add_position_encoding', ] @@ -713,8 +715,18 @@ def dynamic_gru(input, The first part are weights of the update gate and reset gate with shape :math:`(D \\times 2D)`, and the second part are weights for candidate hidden state with shape :math:`(D \\times D)`. - bias_attr(ParamAttr): The parameter attribute for learnable the - hidden-hidden bias. + + If it is set to None or one attribute of ParamAttr, dynamic_gru will + create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with Xavier. Default: None. + bias_attr (ParamAttr|bool|None): The parameter attribute for the bias + of GRU. Note that the bias with :math:`(1 \\times 3D)` concatenates + the bias in the update gate, reset gate and candidate calculations. + If it is set to False, no bias will be applied to the update gate, + reset gate and candidate calculations. If it is set to None or one + attribute of ParamAttr, dynamic_gru will create ParamAttr as + bias_attr. If the Initializer of the bias_attr is not set, the bias + is initialized zero. Default: None. is_reverse(bool): Whether to compute reversed GRU, default :attr:`False`. gate_activation(str): The activation for update gate and reset gate. @@ -813,10 +825,29 @@ def gru_unit(input, Args: input (Variable): The fc transformed input value of current step. - hidden (Variable): The hidden value of lstm unit from previous step. + hidden (Variable): The hidden value of gru unit from previous step. size (integer): The input dimension value. - param_attr (ParamAttr): The weight parameters for gru unit. Default: None - bias_attr (ParamAttr): The bias parameters for gru unit. Default: None + param_attr(ParamAttr|None): The parameter attribute for the learnable + hidden-hidden weight matrix. Note: + + - The shape of the weight matrix is :math:`(T \\times 3D)`, where + :math:`D` is the hidden size. + - All elements in the weight matrix can be divided into two parts. + The first part are weights of the update gate and reset gate with + shape :math:`(D \\times 2D)`, and the second part are weights for + candidate hidden state with shape :math:`(D \\times D)`. + + If it is set to None or one attribute of ParamAttr, gru_unit will + create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with Xavier. Default: None. + bias_attr (ParamAttr|bool|None): The parameter attribute for the bias + of GRU. Note that the bias with :math:`(1 \\times 3D)` concatenates + the bias in the update gate, reset gate and candidate calculations. + If it is set to False, no bias will be applied to the update gate, + reset gate and candidate calculations. If it is set to None or one + attribute of ParamAttr, gru_unit will create ParamAttr as + bias_attr. If the Initializer of the bias_attr is not set, the bias + is initialized zero. Default: None. activation (string): The activation type for cell (actNode). Default: 'tanh' gate_activation (string): The activation type for gates (actGate). @@ -2075,7 +2106,8 @@ def pool2d(input, global_pooling=False, use_cudnn=True, ceil_mode=False, - name=None): + name=None, + exclusive=True): """ ${comment} @@ -2089,11 +2121,13 @@ def pool2d(input, pool_type: ${pooling_type_comment} pool_stride (int): stride of the pooling layer. pool_padding (int): padding size. - global_pooling: ${global_pooling_comment} - use_cudnn: ${use_cudnn_comment} - ceil_mode: ${ceil_mode_comment} + global_pooling (bool): ${global_pooling_comment} + use_cudnn (bool): ${use_cudnn_comment} + ceil_mode (bool): ${ceil_mode_comment} name (str|None): A name for this layer(optional). If set None, the layer will be named automatically. + exclusive (bool): Whether to exclude padding points in average pooling + mode, default is true Returns: Variable: The pooling result. @@ -2151,7 +2185,8 @@ def pool2d(input, "paddings": pool_padding, "use_cudnn": use_cudnn, "ceil_mode": ceil_mode, - "use_mkldnn": False + "use_mkldnn": False, + "exclusive": exclusive, }) return pool_out @@ -2165,7 +2200,8 @@ def pool3d(input, global_pooling=False, use_cudnn=True, ceil_mode=False, - name=None): + name=None, + exclusive=True): """ This function adds the operator for pooling in 3-dimensions, using the pooling configurations mentioned in input parameters. @@ -2181,6 +2217,8 @@ def pool3d(input, ceil_mode (bool): ${ceil_mode_comment} name (str): A name for this layer(optional). If set None, the layer will be named automatically. + exclusive (bool): Whether to exclude padding points in average pooling + mode, default is true Returns: Variable: output of pool3d layer. @@ -2219,7 +2257,8 @@ def pool3d(input, "paddings": pool_padding, "use_cudnn": use_cudnn, "ceil_mode": ceil_mode, - "use_mkldnn": False + "use_mkldnn": False, + "exclusive": exclusive, }) return pool_out @@ -4447,7 +4486,10 @@ def transpose(x, perm, name=None): Examples: .. code-block:: python - x = fluid.layers.data(name='x', shape=[5, 10, 15], dtype='float32') + # use append_batch_size=False to avoid prepending extra + # batch size in shape + x = fluid.layers.data(name='x', shape=[5, 10, 15], + dtype='float32', append_batch_size=False) x_transposed = layers.transpose(x, perm=[1, 0, 2]) """ @@ -4684,7 +4726,8 @@ def multiplex(inputs, index): def softmax_with_cross_entropy(logits, label, soft_label=False, - ignore_index=-100): + ignore_index=-100, + numeric_stable_mode=False): """ **Softmax With Cross Entropy Operator.** @@ -4718,6 +4761,18 @@ def softmax_with_cross_entropy(logits, \\left(\\text{logit}_i - \\log\\left(\\sum_{i=0}^{K} \\exp(\\text{logit}_i)\\right)\\right), j = 1,...,K + 3) If numeric_stable_mode is True, softmax is calculated first by: + + .. math:: + + max_j = \\max_{i=0}^{K}{\\text{logit}_i} + + log\\_max\\_sum_j = \\log\\sum_{i=0}^{K}\\exp(logit_i - max_j) + + softmax_j = \\exp(logit_j - max_j - {log\\_max\\_sum}_j) + + and then cross entropy loss is calculated by softmax and label. + Args: logits (Variable): The unscaled log probabilities, which is a 2-D tensor with shape [N x K]. N is the batch_size, and K is the class number. @@ -4729,6 +4784,13 @@ def softmax_with_cross_entropy(logits, ignore_index (int): Specifies a target value that is ignored and does not contribute to the input gradient. Only valid if soft_label is set to False. Default: -100 + numeric_stable_mode (bool): A flag to indicate whether to use a more + numerically stable algorithm. Only valid + when soft_label is False and GPU is used. + When soft_label is True or CPU is used, + the algorithm is always numerically stable. + Note that the speed may be slower when use + stable algorithm. Default: False Returns: Variable: The cross entropy loss is a 2-D tensor with shape [N x 1]. @@ -4751,8 +4813,11 @@ def softmax_with_cross_entropy(logits, 'Label': label}, outputs={'Softmax': softmax, 'Loss': loss}, - attrs={'soft_label': soft_label, - 'ignore_index': ignore_index}) + attrs={ + 'soft_label': soft_label, + 'ignore_index': ignore_index, + 'numeric_stable_mode': numeric_stable_mode + }) return loss @@ -6113,6 +6178,124 @@ def crop(x, shape=None, offsets=None, name=None): return out +def affine_grid(theta, out_shape, name=None): + """ + It generates a grid of (x,y) coordinates using the parameters of + the affine transformation that correspond to a set of points where + the input feature map should be sampled to produce the transformed + output feature map. + + .. code-block:: text + + * Case 1: + + Given: + + theta = [[[x_11, x_12, x_13] + [x_14, x_15, x_16]] + [[x_21, x_22, x_23] + [x_24, x_25, x_26]]] + + out_shape = [2, 3, 5, 5] + + Step 1: + + Generate normalized coordinates according to out_shape. + The values of the normalized coordinates are in the interval between -1 and 1. + The shape of the normalized coordinates is [2, H, W] as below: + + C = [[[-1. -1. -1. -1. -1. ] + [-0.5 -0.5 -0.5 -0.5 -0.5] + [ 0. 0. 0. 0. 0. ] + [ 0.5 0.5 0.5 0.5 0.5] + [ 1. 1. 1. 1. 1. ]] + [[-1. -0.5 0. 0.5 1. ] + [-1. -0.5 0. 0.5 1. ] + [-1. -0.5 0. 0.5 1. ] + [-1. -0.5 0. 0.5 1. ] + [-1. -0.5 0. 0.5 1. ]]] + C[0] is the coordinates in height axis and C[1] is the coordinates in width axis. + + Step2: + + Tanspose and reshape C to shape [H * W, 2] and append ones to last dimension. The we get: + C_ = [[-1. -1. 1. ] + [-0.5 -1. 1. ] + [ 0. -1. 1. ] + [ 0.5 -1. 1. ] + [ 1. -1. 1. ] + [-1. -0.5 1. ] + [-0.5 -0.5 1. ] + [ 0. -0.5 1. ] + [ 0.5 -0.5 1. ] + [ 1. -0.5 1. ] + [-1. 0. 1. ] + [-0.5 0. 1. ] + [ 0. 0. 1. ] + [ 0.5 0. 1. ] + [ 1. 0. 1. ] + [-1. 0.5 1. ] + [-0.5 0.5 1. ] + [ 0. 0.5 1. ] + [ 0.5 0.5 1. ] + [ 1. 0.5 1. ] + [-1. 1. 1. ] + [-0.5 1. 1. ] + [ 0. 1. 1. ] + [ 0.5 1. 1. ] + [ 1. 1. 1. ]] + Step3: + Compute output by equation $$Output[i] = C_ * Theta[i]^T$$ + + Args: + theta (Variable): A batch of affine transform parameters with shape [N, 2, 3]. + out_shape (Variable | list | tuple): The shape of target output with format [N, C, H, W]. + out_shape can be a Variable or a list or tuple. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The output with shape [N, H, W, 2]. + + Raises: + ValueError: If the type of arguments is not supported. + + Examples: + + .. code-block:: python + theta = fluid.layers.data(name="x", shape=[2, 3], dtype="float32") + out_shape = fluid.layers.data(name="y", shape=[-1], dtype="float32") + data = fluid.layers.affine_grid(theta, out_shape) + + # or + data = fluid.layers.affine_grid(theta, [5, 3, 28, 28]) + + """ + helper = LayerHelper('affine_grid') + + if not (isinstance(out_shape, list) or isinstance(out_shape, tuple) or \ + isinstance(out_shape, Variable)): + raise ValueError("The out_shape should be a list, tuple or Variable.") + + if not isinstance(theta, Variable): + raise ValueError("The theta should be a Variable.") + + out = helper.create_variable_for_type_inference(theta.dtype) + ipts = {'Theta': theta} + attrs = {} + if isinstance(out_shape, Variable): + ipts['OutputShape'] = out_shape + else: + attrs['output_shape'] = out_shape + + helper.append_op( + type='affine_grid', + inputs=ipts, + outputs={'Output': out}, + attrs=None if len(attrs) == 0 else attrs) + return out + + def rank_loss(label, left, right, name=None): """ **Rank loss layer for RankNet** @@ -7327,10 +7510,10 @@ def clip(x, min, max, name=None): helper = LayerHelper("clip", **locals()) if name is None: - out = helper.create_variable_for_type_inference(dtype=x.dtype) - else: - out = helper.create_variable( - name=name, dtype=x.dtype, persistable=False) + name = unique_name.generate(".".join([helper.name, 'tmp'])) + + out = helper.create_variable( + type=x.type, name=name, dtype=x.dtype, persistable=False) helper.append_op( type="clip", @@ -7359,10 +7542,10 @@ def clip_by_norm(x, max_norm, name=None): helper = LayerHelper("clip_by_norm", **locals()) if name is None: - out = helper.create_variable_for_type_inference(dtype=x.dtype) - else: - out = helper.create_variable( - name=name, dtype=x.dtype, persistable=False) + name = unique_name.generate(".".join([helper.name, 'tmp'])) + + out = helper.create_variable( + type=x.type, name=name, dtype=x.dtype, persistable=False) helper.append_op( type="clip_by_norm", @@ -7566,19 +7749,59 @@ def affine_channel(x, scale=None, bias=None, data_layout='NCHW', name=None): def hash(input, hash_size, num_hash=1, name=None): """ - hash the input - Args: - input (Variable): The input variable which is a one-hot word. - hash_size (int): The space size for hash algorithm. + Hash the input to an integer whose value is less than the given hash size. + + The hash algorithm we used was xxHash - Extremely fast hash algorithm + (https://github.com/Cyan4973/xxHash/tree/v0.6.5) + + A simple example as below: + + .. code-block:: text + + Given: + + # shape [2, 2] + input.data = [ + [[1], [2]], + [[3], [4]], + ] + + input.lod = [[0, 2]] + + hash_size = 10000 + + num_hash = 4 + + Then: + + Hash op will take all number in input's 2nd dimension as hash algorithm's + input for each time. Each input will be hashed for 4 times, and get an + array whose length is 4. Each value in the array ranges from 0 to 9999. + + # shape [2, 4] + output.data = [ + [[9662], [9217], [1129], [8487]], + [[8310], [1327], [1654], [4567]], + ] + + output.lod = [[0, 2]] + + Args: + input (Variable): The input variable which is a one-hot word. The + dimensions of the input variable must be 2. + hash_size (int): The space size for hash algorithm. The output value + will keep in the range:math:`[0, hash_size - 1]`. num_hash (int): The times of hash, default 1. name (str, default None): The name of this layer. - Returns: - Variable: The hash result variable which is a LoDTensor. - Examples: - .. code-block:: python - word_dict = paddle.dataset.imdb.word_dict() - x = fluid.layers.data(shape[1], dtype='int32', lod_level=1) - out = fluid.layers.hash(input=x, len(word_dict)) + + Returns: + Variable: The hash result variable which is a LoDTensor. + + Examples: + .. code-block:: python + word_dict = paddle.dataset.imdb.word_dict() + x = fluid.layers.data(shape[1], dtype='int32', lod_level=1) + out = fluid.layers.hash(input=x, num_hash=4, hash_size=1000) """ helper = LayerHelper('hash', **locals()) out = helper.create_variable_for_type_inference( @@ -7592,6 +7815,87 @@ def hash(input, hash_size, num_hash=1, name=None): return out +@templatedoc() +def grid_sampler(x, grid, name=None): + """ + This operation samples input X by using bilinear interpolation based on + flow field grid, which is usually gennerated by affine_grid. The grid of + shape [N, H, W, 2] is the concatenation of (grid_x, grid_y) coordinates + with shape [N, H, W] each, where grid_x is indexing the 4th dimension + (in width dimension) of input data x and grid_y is indexng the 3rd + dimention (in height dimension), finally results is the bilinear + interpolation value of 4 nearest corner points. + + Step 1: + Get (x, y) grid coordinates and scale to [0, H-1/W-1]. + + grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1) + grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1) + + Step 2: + Indices input data X with grid (x, y) in each [H, W] area, and bilinear + interpolate point value by 4 nearest points. + + wn ------- y_n ------- en + | | | + | d_n | + | | | + x_w --d_w-- grid--d_e-- x_e + | | | + | d_s | + | | | + ws ------- y_s ------- wn + + x_w = floor(x) // west side x coord + x_e = x_w + 1 // east side x coord + y_n = floor(y) // north side y coord + y_s = y_s + 1 // south side y coord + + d_w = grid_x - x_w // distance to west side + d_e = x_e - grid_x // distance to east side + d_n = grid_y - y_n // distance to north side + d_s = y_s - grid_y // distance to south side + + wn = X[:, :, y_n, x_w] // north-west point value + en = X[:, :, y_n, x_e] // north-east point value + ws = X[:, :, y_s, x_w] // south-east point value + es = X[:, :, y_s, x_w] // north-east point value + + output = wn * d_e * d_s + en * d_w * d_s + + ws * d_e * d_n + es * d_w * d_n + + Args: + x(Variable): Input data of shape [N, C, H, W]. + grid(Variable): Input grid tensor of shape [N, H, W, 2]. + name (str, default None): The name of this layer. + + Returns: + out(Variable): Output of shape [N, C, H, W] data samples input X + using bilnear interpolation based on input grid. + + Exmples: + .. code-block:: python + + x = fluid.layers.data(name='x', shape=[3, 10, 32, 32], dtype='float32') + theta = fluid.layers.data(name='theta', shape=[3, 2, 3], dtype='float32') + grid = fluid.layers.affine_grid(input=theta, size=[3, 10, 32, 32]}) + out = fluid.layers.grid_sampler(x=x, grid=grid) + """ + helper = LayerHelper("grid_sampler", **locals()) + + if not isinstance(x, Variable): + return ValueError("The x should be a Variable") + + if not isinstance(grid, Variable): + return ValueError("The grid should be a Variable") + + out = helper.create_variable_for_type_inference(x.dtype) + ipts = {'X': x, 'Grid': grid} + + helper.append_op(type='grid_sampler', inputs=ipts, outputs={'Output': out}) + return out + + def log_loss(input, label, epsilon=1e-4, name=None): """ **Negative Log Loss Layer** diff --git a/python/paddle/fluid/recordio_writer.py b/python/paddle/fluid/recordio_writer.py index a69c0c29d4675d3e6b9b2a2d766b8be9935092cf..076a942cdde5623faa570bf98f889e8145b60f8b 100644 --- a/python/paddle/fluid/recordio_writer.py +++ b/python/paddle/fluid/recordio_writer.py @@ -41,9 +41,6 @@ def convert_reader_to_recordio_file( """ Convert a Python Reader to a recordio file. - Please see :ref:`api_guide_python_reader` and :ref:`api_guide_reader_op` for - details. - Examples: >>> import paddle.fluid as fluid diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 2e87d8f4b4fa07773f205fd0a2151095a2353fc6..1513eca51439288acac35729300bcbe4e71e4205 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -86,6 +86,8 @@ if(WITH_DISTRIBUTE) # FIXME(typhoonzero): add this back #py_test_modules(test_dist_transformer MODULES test_dist_transformer) #set_tests_properties(test_dist_transformer PROPERTIES TIMEOUT 1000) + # TODO(typhoonzero): make dist test parallel when fix port management issue + set_tests_properties(test_dist_mnist test_dist_word2vec test_dist_se_resnext test_dist_ctr test_dist_simnet_bow test_dist_save_load test_dist_text_classification test_dist_mnist_batch_merge PROPERTIES RUN_SERIAL TRUE) endif(NOT APPLE) py_test_modules(test_dist_transpiler MODULES test_dist_transpiler) endif() diff --git a/python/paddle/fluid/tests/unittests/dist_save_load.py b/python/paddle/fluid/tests/unittests/dist_save_load.py new file mode 100644 index 0000000000000000000000000000000000000000..edc60550058f53da456c21de4b41142b907743df --- /dev/null +++ b/python/paddle/fluid/tests/unittests/dist_save_load.py @@ -0,0 +1,174 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +import sys +import signal +import subprocess +import argparse +import time +import math +import random +from multiprocessing import Process +from functools import reduce + +import numpy as np +import unittest +import six + +import paddle +import paddle.fluid as fluid +from paddle.fluid import core +from paddle.fluid import io + +from test_dist_base import TestDistRunnerBase, runtime_main, RUN_STEP +from dist_simnet_bow import TestDistSimnetBow2x2, DATA_URL, DATA_MD5 + + +class TestDistSaveLoad2x2(TestDistSimnetBow2x2): + def _load_persistable_vars(self, executor, dirname, program): + def _is_checkpoint_var(var): + """ + the checkpoint will not save or load all the variables. + var type is FEED_MINIBATCH/FETCH_LIST/RAW or var name ends with @GRAD are discarded. + + : param var(Variable) + """ + if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ + var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \ + var.desc.type() == core.VarDesc.VarType.RAW: + return False + # @GRAD are named for gradient variables, checkpoint will not save it. + if "@GRAD" in var.name: + return False + # .trainer_ are named for distribute train variables, checkpoint will not save it. + if ".trainer_" in var.name: + return False + + # .block is named for distribute train variables, checkpoint will not save it. + if ".block" in var.name: + return False + + if "tmp_" in var.name: + return False + + return var.persistable + + io.load_vars( + executor, + dirname=dirname, + main_program=program, + predicate=_is_checkpoint_var, + filename=None) + + def run_pserver(self, args): + self.get_model(batch_size=2) + # NOTE: pserver should not call memory optimize + t = self.get_transpiler(args.trainer_id, + fluid.default_main_program(), args.endpoints, + args.trainers, args.sync_mode) + pserver_prog = t.get_pserver_program(args.current_endpoint) + startup_prog = t.get_startup_program(args.current_endpoint, + pserver_prog) + + need_load = bool(int(os.getenv("LOAD", "0"))) + model_dir = os.getenv("MODEL_DIR", "") + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + + if need_load and model_dir: + self._load_persistable_vars(exe, model_dir, startup_prog) + exe.run(pserver_prog) + + def run_trainer(self, args): + test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \ + self.get_model(batch_size=2) + + if args.mem_opt: + fluid.memory_optimize(fluid.default_main_program(), skip_grads=True) + if args.is_dist: + t = self.get_transpiler(args.trainer_id, + fluid.default_main_program(), + args.endpoints, args.trainers, + args.sync_mode) + + trainer_prog = t.get_trainer_program() + else: + trainer_prog = fluid.default_main_program() + + if args.use_cuda: + place = fluid.CUDAPlace(0) + else: + place = fluid.CPUPlace() + + startup_exe = fluid.Executor(place) + startup_exe.run(fluid.default_startup_program()) + + strategy = fluid.ExecutionStrategy() + strategy.num_threads = 1 + strategy.allow_op_delay = False + + build_stra = fluid.BuildStrategy() + + if args.use_reduce: + build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce + else: + build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce + + exe = fluid.ParallelExecutor( + args.use_cuda, + loss_name=avg_cost.name, + exec_strategy=strategy, + build_strategy=build_stra) + + feed_var_list = [ + var for var in trainer_prog.global_block().vars.values() + if var.is_data + ] + + feeder = fluid.DataFeeder(feed_var_list, place) + reader_generator = train_reader() + + def get_data(): + origin_batch = next(reader_generator) + if args.is_dist and args.use_reader_alloc: + new_batch = [] + for offset, item in enumerate(origin_batch): + if offset % 2 == args.trainer_id: + new_batch.append(item) + return new_batch + else: + return origin_batch + + need_save = bool(int(os.getenv("SAVE", "0"))) + model_dir = os.getenv("MODEL_DIR", "") + + if need_save: + for _ in six.moves.xrange(RUN_STEP): + loss, = exe.run(fetch_list=[avg_cost.name], + feed=feeder.feed(get_data())) + if need_save and model_dir: + io.save_persistables(startup_exe, model_dir, trainer_prog) + + var = np.array(fluid.global_scope().find_var('__fc_b__').get_tensor()) + print(np.ravel(var).tolist()) + + +if __name__ == "__main__": + paddle.dataset.common.download(DATA_URL, 'simnet', DATA_MD5, "train") + runtime_main(TestDistSaveLoad2x2) diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index e97643cddef22465436051a41ef4b825e9634d23..690c4cf0ad6b2c741689e419223cfa6b6e1e5cf3 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -54,14 +54,6 @@ def get_numeric_gradient(place, def product(dim): return six.moves.reduce(lambda a, b: a * b, dim, 1) - def get_output(): - sum = [] - op.run(scope, place) - for output_name in output_names: - sum.append( - np.array(scope.find_var(output_name).get_tensor()).mean()) - return np.array(sum).sum() / len(output_names) - tensor_to_check = scope.find_var(input_to_check).get_tensor() tensor_size = product(tensor_to_check.shape()) tensor_to_check_dtype = tensor_to_check._dtype() @@ -77,6 +69,15 @@ def get_numeric_gradient(place, raise ValueError("Not supported data type " + str( tensor_to_check_dtype)) + def get_output(): + sum = [] + op.run(scope, place) + for output_name in output_names: + sum.append( + np.array(scope.find_var(output_name).get_tensor()).astype( + tensor_to_check_dtype).mean()) + return tensor_to_check_dtype(np.array(sum).sum() / len(output_names)) + gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype) def __get_elem__(tensor, i): diff --git a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py index ee291fe746f3a1b6ce18df9fb6aa174a89e2eadd..86f861674c26fe61e624103c2a0d70f816a1aebc 100644 --- a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py +++ b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py @@ -18,6 +18,7 @@ import multiprocessing import os import unittest import paddle.fluid as fluid +import paddle.fluid.core as core import time import numpy as np import math @@ -40,7 +41,8 @@ class TestParallelExecutorBase(unittest.TestCase): use_reduce=False, fuse_elewise_add_act_ops=False, optimizer=fluid.optimizer.Adam, - use_fast_executor=False): + use_fast_executor=False, + enable_sequential_execution=False): def run_executor(exe, feed, fetch_list, program=None): if isinstance(exe, fluid.ParallelExecutor): res = exe.run(fetch_list=fetch_list, feed=feed) @@ -80,6 +82,9 @@ class TestParallelExecutorBase(unittest.TestCase): build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce \ if use_reduce else fluid.BuildStrategy.ReduceStrategy.AllReduce build_strategy.fuse_elewise_add_act_ops = fuse_elewise_add_act_ops + build_strategy.enable_sequential_execution = enable_sequential_execution + if use_cuda and core.is_compiled_with_cuda(): + build_strategy.remove_unnecessary_lock = True if use_parallel_executor: exe = fluid.ParallelExecutor( diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 30651c1326328180592520447e597aa722146a42..ad7591417ec116a2232bfb7cd94be37a32edfc2e 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -21,7 +21,7 @@ from op_test import OpTest from scipy.special import expit -class TestExp(OpTest): +class TestActivation(OpTest): def setUp(self): self.op_type = "exp" self.dtype = np.float32 @@ -42,24 +42,12 @@ class TestExp(OpTest): self.check_grad(['X'], 'Out', max_relative_error=0.007) def init_dtype(self): - pass - - -class TestFP16Exp(TestExp): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) + self.dtype = np.float32 -class TestSigmoid(OpTest): +class TestSigmoid(TestActivation): def setUp(self): self.op_type = "sigmoid" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) @@ -68,33 +56,15 @@ class TestSigmoid(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.01) - def init_dtype(self): - pass - - -class TestFP16Sigmoid(TestSigmoid): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - -class TestLogSigmoid(OpTest): +class TestLogSigmoid(TestActivation): def setUp(self): self.op_type = "logsigmoid" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) @@ -103,33 +73,15 @@ class TestLogSigmoid(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.008) - def init_dtype(self): - pass - - -class TestFP16LogSigmoid(TestLogSigmoid): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - -class TestTanh(OpTest): +class TestTanh(TestActivation): def setUp(self): self.op_type = "tanh" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) @@ -138,33 +90,15 @@ class TestTanh(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) - def init_dtype(self): - pass - - -class TestFP16Tanh(TestTanh): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - -class TestTanhShrink(OpTest): +class TestTanhShrink(TestActivation): def setUp(self): self.op_type = "tanh_shrink" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype) @@ -173,33 +107,15 @@ class TestTanhShrink(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.008) - def init_dtype(self): - pass - - -class TestFP16TanhShrink(TestTanhShrink): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - -class TestHardShrink(OpTest): +class TestHardShrink(TestActivation): def setUp(self): self.op_type = "hard_shrink" - self.dtype = np.float32 self.init_dtype() threshold = 0.5 @@ -211,33 +127,15 @@ class TestHardShrink(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.005) - def init_dtype(self): - pass - - -class TestFP16HardShrink(TestHardShrink): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - -class TestSoftShrink(OpTest): +class TestSoftShrink(TestActivation): def setUp(self): self.op_type = "softshrink" - self.dtype = np.float32 self.init_dtype() lambda_val = 0.1 @@ -250,33 +148,15 @@ class TestSoftShrink(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) - def init_dtype(self): - pass - - -class TestFP16SoftShrink(TestSoftShrink): - def init_dtype(self): - self.dtype = np.float16 - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - - -class TestSqrt(OpTest): +class TestSqrt(TestActivation): def setUp(self): self.op_type = "sqrt" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) @@ -285,33 +165,15 @@ class TestSqrt(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) - def init_dtype(self): - pass - -class TestFP16Sqrt(TestSqrt): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - - -class TestAbs(OpTest): +class TestAbs(TestActivation): def setUp(self): self.op_type = "abs" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) @@ -325,33 +187,15 @@ class TestAbs(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) - def init_dtype(self): - pass - -class TestFP16Abs(TestAbs): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - - -class TestCeil(OpTest): +class TestCeil(TestActivation): def setUp(self): self.op_type = "ceil" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) @@ -360,30 +204,14 @@ class TestCeil(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - # The same reason with TestFloor - - def init_dtype(self): + def test_check_grad(self): pass -class TestFP16Ceil(TestCeil): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - - -class TestFloor(OpTest): +class TestFloor(TestActivation): def setUp(self): self.op_type = "floor" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) @@ -392,31 +220,16 @@ class TestFloor(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - # the gradient on floor, ceil, round is undefined. # we return zero as gradient, but the numpy return nan - - def init_dtype(self): + # The same reason with TestFloor + def test_check_grad(self): pass -class TestFP16Floor(TestFloor): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - - -class TestCos(OpTest): +class TestCos(TestActivation): def setUp(self): self.op_type = "cos" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) @@ -425,33 +238,15 @@ class TestCos(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) - def init_dtype(self): - pass - - -class TestFP16Cos(TestCos): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - -class TestSin(OpTest): +class TestSin(TestActivation): def setUp(self): self.op_type = "sin" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) @@ -460,33 +255,15 @@ class TestSin(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) - def init_dtype(self): - pass - - -class TestFP16Sin(TestSin): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - -class TestRound(OpTest): +class TestRound(TestActivation): def setUp(self): self.op_type = "round" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) @@ -495,28 +272,13 @@ class TestRound(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - - def init_dtype(self): + def test_check_grad(self): pass -class TestFP16Round(TestRound): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - - -class TestRelu(OpTest): +class TestRelu(TestActivation): def setUp(self): self.op_type = "relu" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) @@ -527,33 +289,15 @@ class TestRelu(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) - def init_dtype(self): - pass - - -class TestFP16Relu(TestRelu): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - -class TestBRelu(OpTest): +class TestBRelu(TestActivation): def setUp(self): self.op_type = "brelu" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) @@ -570,33 +314,15 @@ class TestBRelu(OpTest): self.attrs = {'t_min': t_min, 't_max': t_max} self.outputs = {'Out': t} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.02) - def init_dtype(self): - pass - -class TestFP16BRelu(TestBRelu): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - - -class TestRelu6(OpTest): +class TestRelu6(TestActivation): def setUp(self): self.op_type = "relu6" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype) @@ -610,33 +336,15 @@ class TestRelu6(OpTest): self.attrs = {'threshold': threshold} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.02) - def init_dtype(self): - pass - -class TestFP16Relu6(TestRelu6): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - - -class TestSoftRelu(OpTest): +class TestSoftRelu(TestActivation): def setUp(self): self.op_type = "soft_relu" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype) @@ -653,33 +361,15 @@ class TestSoftRelu(OpTest): self.attrs = {'threshold': threshold} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.02) - def init_dtype(self): - pass - - -class TestFP16SoftRelu(TestSoftRelu): - def init_dtype(self): - self.dtype = np.float16 - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - - -class TestELU(OpTest): +class TestELU(TestActivation): def setUp(self): self.op_type = "elu" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype) @@ -691,33 +381,15 @@ class TestELU(OpTest): self.attrs = {'alpha': alpha} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.02) - def init_dtype(self): - pass - - -class TestFP16ELU(TestELU): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - -class TestReciprocal(OpTest): +class TestReciprocal(TestActivation): def setUp(self): self.op_type = "reciprocal" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype) @@ -726,33 +398,15 @@ class TestReciprocal(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.01) - def init_dtype(self): - pass - - -class TestFP16Reciprocal(TestReciprocal): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - -class TestLog(OpTest): +class TestLog(TestActivation): def setUp(self): self.op_type = "log" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) @@ -761,33 +415,15 @@ class TestLog(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) - def init_dtype(self): - pass - - -class TestFP16Log(TestLog): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - -class TestSquare(OpTest): +class TestSquare(TestActivation): def setUp(self): self.op_type = "square" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) @@ -796,33 +432,15 @@ class TestSquare(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) - def init_dtype(self): - pass - -class TestFP16Square(TestSquare): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - - -class TestPow(OpTest): +class TestPow(TestActivation): def setUp(self): self.op_type = "pow" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype) @@ -832,33 +450,15 @@ class TestPow(OpTest): self.attrs = {'factor': 3.0} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.02) - def init_dtype(self): - pass - - -class TestFP16Pow(TestPow): - def init_dtype(self): - self.dtype = np.float16 - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=5e-2) - - -class TestSTanh(OpTest): +class TestSTanh(TestActivation): def setUp(self): self.op_type = "stanh" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) @@ -870,34 +470,17 @@ class TestSTanh(OpTest): self.attrs = {'scale_a': scale_a, 'scale_b': scale_b} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) - def init_dtype(self): - pass - -class TestFP16STanh(TestSTanh): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - - -class TestSoftplus(OpTest): +class TestSoftplus(TestActivation): def setUp(self): self.op_type = "softplus" - self.dtype = np.float64 self.init_dtype() + self.dtype = np.float64 x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) out = np.log(1 + np.exp(x)) @@ -905,33 +488,15 @@ class TestSoftplus(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) - def init_dtype(self): - pass - - -class TestFP16Softplus(TestSoftplus): - def init_dtype(self): - self.dtype = np.float16 - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - - -class TestSoftsign(OpTest): +class TestSoftsign(TestActivation): def setUp(self): self.op_type = "softsign" - self.dtype = np.float32 self.init_dtype() x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) @@ -940,33 +505,15 @@ class TestSoftsign(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.007) - def init_dtype(self): - pass - - -class TestFP16Softsign(TestSoftsign): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - -class TestThresholdedRelu(OpTest): +class TestThresholdedRelu(TestActivation): def setUp(self): self.op_type = "thresholded_relu" - self.dtype = np.float32 self.init_dtype() threshold = 0.25 @@ -981,33 +528,15 @@ class TestThresholdedRelu(OpTest): self.attrs = {'threshold': threshold} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=self.relative_error) - def init_dtype(self): - pass - - -class TestFP16ThresholdedRelu(TestThresholdedRelu): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - -class TestHardSigmoid(OpTest): +class TestHardSigmoid(TestActivation): def setUp(self): self.op_type = "hard_sigmoid" - self.dtype = np.float32 self.init_dtype() self.relative_error = 0.002 @@ -1030,33 +559,15 @@ class TestHardSigmoid(OpTest): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.002) - def init_dtype(self): - pass - - -class TestFP16HardSigmoid(TestHardSigmoid): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - -class TestSwish(OpTest): +class TestSwish(TestActivation): def setUp(self): self.op_type = "swish" - self.dtype = np.float32 self.init_dtype() X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) @@ -1067,28 +578,70 @@ class TestSwish(OpTest): self.attrs = {'beta': beta} self.outputs = {'Out': out} - def test_check_output(self): - self.check_output() - def test_check_grad(self): if self.dtype == np.float16: return self.check_grad(['X'], 'Out', max_relative_error=0.008) - def init_dtype(self): - pass - -class TestFP16Swish(TestSwish): - def init_dtype(self): - self.dtype = np.float16 +#------------------ Test Fp16 ---------------------- +def create_test_act_fp16_class(parent, + atol=1e-3, + grad_check=True, + grad_atol=0.80): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") + class TestActFp16(parent): + def init_dtype(self): + self.dtype = np.float16 - def test_check_output(self): - if core.is_compiled_with_cuda(): + def test_check_output(self): place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) + support_fp16 = core.is_float16_supported(place) + if support_fp16: + self.check_output_with_place(place, atol=atol) + def test_check_grad(self): + place = core.CUDAPlace(0) + support_fp16 = core.is_float16_supported(place) + if support_fp16 and grad_check: + self.check_grad_with_place( + place, ['X'], 'Out', max_relative_error=grad_atol) + + cls_name = "{0}_{1}".format(parent.__name__, "fp16") + TestActFp16.__name__ = cls_name + globals()[cls_name] = TestActFp16 + + +create_test_act_fp16_class(TestActivation) +create_test_act_fp16_class(TestSigmoid) +create_test_act_fp16_class(TestLogSigmoid) +create_test_act_fp16_class(TestTanh) +create_test_act_fp16_class(TestTanhShrink) +create_test_act_fp16_class(TestHardShrink) +create_test_act_fp16_class(TestSoftShrink) +create_test_act_fp16_class(TestSqrt) +create_test_act_fp16_class(TestAbs) +create_test_act_fp16_class(TestCeil, grad_check=False) +create_test_act_fp16_class(TestFloor, grad_check=False) +create_test_act_fp16_class(TestCos, grad_atol=0.85) +create_test_act_fp16_class(TestSin) +create_test_act_fp16_class(TestRound, grad_check=False) +create_test_act_fp16_class(TestRelu) +create_test_act_fp16_class(TestBRelu) +create_test_act_fp16_class(TestRelu6) +create_test_act_fp16_class(TestSoftRelu) +create_test_act_fp16_class(TestELU) +create_test_act_fp16_class(TestReciprocal) +create_test_act_fp16_class(TestLog) +create_test_act_fp16_class(TestSquare) +create_test_act_fp16_class(TestPow, atol=5e-2) +create_test_act_fp16_class(TestSTanh, grad_atol=0.9) +create_test_act_fp16_class(TestSoftplus) +create_test_act_fp16_class(TestSoftsign) +create_test_act_fp16_class(TestThresholdedRelu) +create_test_act_fp16_class(TestHardSigmoid) +create_test_act_fp16_class(TestSwish) if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_affine_grid_op.py b/python/paddle/fluid/tests/unittests/test_affine_grid_op.py new file mode 100644 index 0000000000000000000000000000000000000000..576d00940c4c7a5e30af5550e14b674a73e7df11 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_affine_grid_op.py @@ -0,0 +1,79 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +def AffineGrid(theta, size): + n = size[0] + w = size[3] + h = size[2] + h_idx = np.repeat( + np.linspace(-1, 1, h)[np.newaxis, :], w, axis=0).T[:, :, np.newaxis] + w_idx = np.repeat( + np.linspace(-1, 1, w)[np.newaxis, :], h, axis=0)[:, :, np.newaxis] + grid = np.concatenate( + [w_idx, h_idx, np.ones([h, w, 1])], axis=2) # h * w * 3 + grid = np.repeat(grid[np.newaxis, :], size[0], axis=0) # n * h * w *3 + + ret = np.zeros([n, h * w, 2]) + theta = theta.transpose([0, 2, 1]) + for i in range(len(theta)): + ret[i] = np.dot(grid[i].reshape([h * w, 3]), theta[i]) + +# print ret.reshape([h * w, 2]).astype("float32") + return ret.reshape([n, h, w, 2]).astype("float32") + + +class TestAffineGridOp(OpTest): + def setUp(self): + self.initTestCase() + self.op_type = "affine_grid" + theta = np.random.randint(1, 3, self.theta_shape).astype("float32") + theta = np.ones(self.theta_shape).astype("float32") + self.inputs = {'Theta': theta} + self.attrs = {"use_cudnn": True} + if self.dynamic_shape: + self.inputs['OutputShape'] = self.output_shape + else: + self.attrs['output_shape'] = self.output_shape + self.outputs = {'Output': AffineGrid(theta, self.output_shape)} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad( + ['Theta'], + 'Output', + no_grad_set=['OutputShape'], + max_relative_error=0.006) + + def initTestCase(self): + self.theta_shape = (3, 2, 3) + self.output_shape = np.array([3, 2, 5, 7]).astype("int32") + self.dynamic_shape = False + + +class TestAffineGridOpCase1(TestAffineGridOp): + def initTestCase(self): + self.theta_shape = (3, 2, 3) + self.output_shape = np.array([3, 2, 5, 7]).astype("int32") + self.dynamic_shape = True + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index 2ecc2504a8c9c5ecfc32cee96df9e368ff219cbb..aba3e7139c25fe66f814e41080afb8f1dad79e4b 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -223,106 +223,81 @@ class TestWithInput1x1Filter1x1(TestConv2dOp): #----------------Conv2dCUDNN---------------- -class TestCUDNN(TestConv2dOp): - def init_kernel_type(self): - self.use_cudnn = True - -class TestFP16CUDNN(TestConv2dOp): - def init_kernel_type(self): - self.use_cudnn = True - self.dtype = np.float16 - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=2e-2) +def create_test_cudnn_class(parent, cls_name): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") + class TestCUDNNCase(parent): + def init_kernel_type(self): + self.use_cudnn = True + cls_name = "{0}".format(cls_name) + TestCUDNNCase.__name__ = cls_name + globals()[cls_name] = TestCUDNNCase -class TestCUDNNWithPad(TestWithPad): - def init_kernel_type(self): - self.use_cudnn = True - - -class TestFP16CUDNNWithPad(TestWithPad): - def init_kernel_type(self): - self.use_cudnn = True - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=2e-2) - - -class TestCUDNNWithStride(TestWithStride): - def init_kernel_type(self): - self.use_cudnn = True - - -class TestFP16CUDNNWithStride(TestWithStride): - def init_kernel_type(self): - self.use_cudnn = True - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=2e-2) +create_test_cudnn_class(TestConv2dOp, "TestPool2DCUDNNOp") +create_test_cudnn_class(TestWithPad, "TestPool2DCUDNNOpCase1") +create_test_cudnn_class(TestWithStride, "TestPool2DCUDNNOpCase2") +create_test_cudnn_class(TestWithGroup, "TestPool2DCUDNNOpCase3") +create_test_cudnn_class(TestWith1x1, "TestPool2DCUDNNOpCase4") +create_test_cudnn_class(TestWithInput1x1Filter1x1, "TestPool2DCUDNNOpCase4") -class TestCUDNNWithGroup(TestWithGroup): - def init_kernel_type(self): - self.use_cudnn = True - - -class TestFP16CUDNNWithGroup(TestWithGroup): - def init_kernel_type(self): - self.use_cudnn = True - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=2e-2) - +#----------------Conv2dCUDNN---------------- -class TestCUDNNWith1x1(TestWith1x1): - def init_kernel_type(self): - self.use_cudnn = True +def create_test_cudnn_fp16_class(parent, cls_name, grad_check=True): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") + class TestConv2DCUDNNFp16(parent): + def init_kernel_type(self): + self.use_cudnn = True + self.dtype = np.float16 -class TestFP16CUDNNWith1x1(TestWith1x1): - def init_kernel_type(self): - self.use_cudnn = True - self.dtype = np.float16 + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-2) - def test_check_output(self): - if core.is_compiled_with_cuda(): + def test_check_grad_no_filter(self): place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=2e-2) - - -class TestCUDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): - def init_kernel_type(self): - self.use_cudnn = True - - -class TestFP16CUDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): - def init_kernel_type(self): - self.use_cudnn = True - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): + if core.is_float16_supported(place) and grad_check: + self.check_grad_with_place( + place, ['Input'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Filter'])) + + def test_check_grad_no_input(self): place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=2e-2) + if core.is_float16_supported(place) and grad_check: + self.check_grad_with_place( + place, ['Filter'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Input'])) + + cls_name = "{0}".format(cls_name) + TestConv2DCUDNNFp16.__name__ = cls_name + globals()[cls_name] = TestConv2DCUDNNFp16 + + +create_test_cudnn_fp16_class( + TestConv2dOp, "TestPool2DCUDNNFp16Op", grad_check=False) +create_test_cudnn_fp16_class( + TestWithPad, "TestPool2DCUDNNFp16OpCase1", grad_check=False) +create_test_cudnn_fp16_class( + TestWithStride, "TestPool2DCUDNNFp16OpCase2", grad_check=False) +create_test_cudnn_fp16_class( + TestWithGroup, "TestPool2DCUDNNFp16OpCase3", grad_check=False) +create_test_cudnn_fp16_class( + TestWith1x1, "TestPool2DCUDNNFp16OpCase4", grad_check=False) +create_test_cudnn_fp16_class( + TestWithInput1x1Filter1x1, "TestPool2DCUDNNFp16OpCase4", grad_check=False) + +# -------TestDepthwiseConv class TestDepthwiseConv(TestConv2dOp): diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py index f22badbea0c67b210f7ac4e14e5d647f1cffa6cc..4bdc6403cb4fde2b1f4efd957e922b7ea5cd8f38 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py @@ -16,28 +16,58 @@ from __future__ import print_function import unittest import numpy as np +import paddle.fluid.core as core from op_test import OpTest, randomize_probability -class TestCrossEntropyOp1(OpTest): +class TestCrossEntropyOp(OpTest): """Test cross-entropy with discrete one-hot labels. """ def setUp(self): self.op_type = "cross_entropy" - batch_size = 30 - class_num = 10 + self.soft_label = False + self.ignore_index = -100 + self.dtype = np.float64 + self.batch_size = 30 + self.class_num = 10 + + self.init_dtype_type() + self.init_attr_type() + self.init_bs_class_num() + self.init_x() + self.init_label() + self.get_cross_entropy() + + self.inputs = {"X": self.x, "Label": self.label} + self.outputs = {"Y": self.cross_entropy} + self.attrs = { + "soft_label": self.soft_label, + "ignore_index": self.ignore_index + } + + def init_x(self): + self.x = randomize_probability( + self.batch_size, self.class_num, dtype=self.dtype) + + def init_label(self): + self.label = np.random.randint( + 0, self.class_num, (self.batch_size, 1), dtype="int64") + + def get_cross_entropy(self): + self.cross_entropy = np.asmatrix( + [[-np.log(self.x[i][self.label[i][0]])] + for i in range(self.x.shape[0])], + dtype="float64") - X = randomize_probability(batch_size, class_num, dtype='float64') + def init_attr_type(self): + pass - label = np.random.randint(0, class_num, (batch_size, 1), dtype="int64") - cross_entropy = np.asmatrix( - [[-np.log(X[i][label[i][0]])] for i in range(X.shape[0])], - dtype="float64") + def init_dtype_type(self): + pass - self.inputs = {"X": X, "Label": label} - self.outputs = {"Y": cross_entropy} - self.attrs = {"soft_label": False} + def init_bs_class_num(self): + pass def test_check_output(self): self.check_output() @@ -46,197 +76,231 @@ class TestCrossEntropyOp1(OpTest): self.check_grad(["X"], "Y", numeric_grad_delta=0.001) -class TestCrossEntropyOp2(OpTest): +class TestCrossEntropyOp2(TestCrossEntropyOp): """Test cross-entropy with vectorized soft labels. """ - def setUp(self): - self.op_type = "cross_entropy" - batch_size = 5 - class_num = 37 + def init_label(self): + self.label = np.random.uniform( + 0.1, 1.0, [self.batch_size, self.class_num]).astype(self.dtype) + self.label /= self.label.sum(axis=1, keepdims=True) - X = randomize_probability(batch_size, class_num) - label = np.random.uniform(0.1, 1.0, - [batch_size, class_num]).astype("float32") - label /= label.sum(axis=1, keepdims=True) - cross_entropy = (-label * np.log(X)).sum( - axis=1, keepdims=True).astype("float32") + def get_cross_entropy(self): + self.cross_entropy = (-self.label * np.log(self.x)).sum( + axis=1, keepdims=True).astype(self.dtype) - self.inputs = {"X": X, "Label": label} - self.outputs = {"Y": cross_entropy} - self.attrs = {"soft_label": True} + def init_attr_type(self): + self.soft_label = True - def test_check_output(self): - self.check_output() + def init_dtype_type(self): + self.dtype = np.float32 + + def init_bs_class_num(self): + self.batch_size = 5 + self.class_num = 37 def test_check_grad(self): self.check_grad( ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) -class TestCrossEntropyOp3(OpTest): +class TestCrossEntropyOp3(TestCrossEntropyOp): """Test cross-entropy with vectorized one-hot representation of labels. """ - def setUp(self): - self.op_type = "cross_entropy" - batch_size = 5 - class_num = 17 + def init_label(self): + self.label_index = np.random.randint(0, self.class_num, + (self.batch_size)) + self.label = np.zeros(self.x.shape).astype(self.dtype) + self.label[np.arange(self.batch_size), self.label_index] = 1 - X = randomize_probability(batch_size, class_num) - label_index = np.random.randint( - 0, class_num, (batch_size), dtype="int32") - label = np.zeros(X.shape) - label[np.arange(batch_size), label_index] = 1 + def get_cross_entropy(self): + self.cross_entropy = np.asmatrix( + [[-np.log(self.x[i][self.label_index[i]])] + for i in range(self.x.shape[0])]).astype(self.dtype) - cross_entropy = np.asmatrix( - [[-np.log(X[i][label_index[i]])] for i in range(X.shape[0])], - dtype="float32") - cross_entropy2 = (-label * np.log(X)).sum( - axis=1, keepdims=True).astype("float32") + def init_attr_type(self): + self.soft_label = True - self.inputs = {"X": X, "Label": label.astype(np.float32)} - self.outputs = {"Y": cross_entropy} - self.attrs = {"soft_label": True} + def init_dtype_type(self): + self.dtype = np.float32 - def test_check_output(self): - self.check_output() + def init_bs_class_num(self): + self.batch_size = 5 + self.class_num = 17 def test_check_grad(self): self.check_grad( ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) -class TestCrossEntropyOp4(OpTest): +class TestCrossEntropyOp4(TestCrossEntropyOp): """Test high rank tensor cross-entropy with discrete one-hot labels. """ - def setUp(self): - self.op_type = "cross_entropy" - shape = [10, 2, 4] - ins_num = np.prod(np.array(shape)) - class_num = 10 + def init_x(self): + self.shape = [10, 2, 4] + self.ins_num = np.prod(np.array(self.shape)) + self.X_2d = randomize_probability(self.ins_num, + self.class_num).astype(self.dtype) + self.x = self.X_2d.reshape(self.shape + [self.class_num]) - X_2d = randomize_probability(ins_num, class_num, dtype='float64') + def init_label(self): + self.label_2d = np.random.randint( + 0, self.class_num, (self.ins_num, 1), dtype="int64") + self.label = self.label_2d.reshape(self.shape + [1]) - label_2d = np.random.randint(0, class_num, (ins_num, 1), dtype="int64") + def get_cross_entropy(self): cross_entropy_2d = np.asmatrix( - [[-np.log(X_2d[i][label_2d[i][0]])] for i in range(X_2d.shape[0])], - dtype="float64") + [[-np.log(self.X_2d[i][self.label_2d[i][0]])] + for i in range(self.X_2d.shape[0])]).astype(self.dtype) + self.cross_entropy = np.array(cross_entropy_2d).reshape(self.shape + + [1]) - X = X_2d.reshape(shape + [class_num]) - label = label_2d.reshape(shape + [1]) - cross_entropy = np.array(cross_entropy_2d).reshape(shape + [1]) + def init_attr_type(self): + self.soft_label = False - self.inputs = {"X": X, "Label": label} - self.outputs = {"Y": cross_entropy} - self.attrs = {"soft_label": False} - - def test_check_output(self): - self.check_output() + def init_dtype_type(self): + self.dtype = np.float64 - def test_check_grad(self): - self.check_grad(["X"], "Y", numeric_grad_delta=0.001) + def init_bs_class_num(self): + self.class_num = 10 -class TestCrossEntropyOp5(OpTest): +class TestCrossEntropyOp5(TestCrossEntropyOp): """Test high rank tensor cross-entropy with vectorized soft labels. """ - def setUp(self): - self.op_type = "cross_entropy" - shape = [4, 3] - ins_num = np.prod(np.array(shape)) - class_num = 37 + def init_x(self): + self.shape = [4, 3] + self.ins_num = np.prod(np.array(self.shape)) + self.X_2d = randomize_probability(self.ins_num, + self.class_num).astype(self.dtype) + self.x = self.X_2d.reshape(self.shape + [self.class_num]) - X_2d = randomize_probability(ins_num, class_num) - label_2d = np.random.uniform(0.1, 1.0, - [ins_num, class_num]).astype("float32") - label_2d /= label_2d.sum(axis=1, keepdims=True) - cross_entropy_2d = (-label_2d * np.log(X_2d)).sum( - axis=1, keepdims=True).astype("float32") + def init_label(self): + self.label_2d = np.random.uniform( + 0.1, 1.0, [self.ins_num, self.class_num]).astype(self.dtype) + self.label_2d /= self.label_2d.sum(axis=1, keepdims=True) + self.label = self.label_2d.reshape(self.shape + [self.class_num]) - X = X_2d.reshape(shape + [class_num]) - label = label_2d.reshape(shape + [class_num]) - cross_entropy = np.array(cross_entropy_2d).reshape(shape + [1]) + def get_cross_entropy(self): + cross_entropy_2d = (-self.label_2d * np.log(self.X_2d)).sum( + axis=1, keepdims=True).astype(self.dtype) + self.cross_entropy = np.array(cross_entropy_2d).reshape(self.shape + + [1]) - self.inputs = {"X": X, "Label": label} - self.outputs = {"Y": cross_entropy} - self.attrs = {"soft_label": True} + def init_attr_type(self): + self.soft_label = True - def test_check_output(self): - self.check_output() + def init_dtype_type(self): + self.dtype = np.float32 + + def init_bs_class_num(self): + self.class_num = 37 def test_check_grad(self): self.check_grad( ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) -class TestCrossEntropyOp6(OpTest): +class TestCrossEntropyOp6(TestCrossEntropyOp): """Test high rank tensor cross-entropy with vectorized one-hot representation of labels. """ - def setUp(self): - self.op_type = "cross_entropy" - shape = [4, 3, 2] - ins_num = np.prod(np.array(shape)) - class_num = 17 - - X_2d = randomize_probability(ins_num, class_num) - label_index_2d = np.random.randint( - 0, class_num, (ins_num), dtype="int32") - label_2d = np.zeros(X_2d.shape) - label_2d[np.arange(ins_num), label_index_2d] = 1 - + def init_x(self): + self.shape = [4, 3, 2] + self.ins_num = np.prod(np.array(self.shape)) + self.X_2d = randomize_probability(self.ins_num, + self.class_num).astype(self.dtype) + self.x = self.X_2d.reshape(self.shape + [self.class_num]) + + def init_label(self): + self.label_index_2d = np.random.randint( + 0, self.class_num, (self.ins_num), dtype="int64") + label_2d = np.zeros(self.X_2d.shape) + label_2d[np.arange(self.ins_num), self.label_index_2d] = 1 + self.label = label_2d.reshape(self.shape + [self.class_num]).astype( + self.dtype) + + def get_cross_entropy(self): cross_entropy_2d = np.asmatrix( - [[-np.log(X_2d[i][label_index_2d[i]])] - for i in range(X_2d.shape[0])], - dtype="float32") + [[-np.log(self.X_2d[i][self.label_index_2d[i]])] + for i in range(self.X_2d.shape[0])]) + self.cross_entropy = np.array(cross_entropy_2d).reshape( + self.shape + [1]).astype(self.dtype) - X = X_2d.reshape(shape + [class_num]) - label = label_2d.reshape(shape + [class_num]) - cross_entropy = np.array(cross_entropy_2d).reshape(shape + [1]) + def init_attr_type(self): + self.soft_label = True - self.inputs = {"X": X, "Label": label.astype(np.float32)} - self.outputs = {"Y": cross_entropy} - self.attrs = {"soft_label": True} + def init_dtype_type(self): + self.dtype = np.float32 - def test_check_output(self): - self.check_output() + def init_bs_class_num(self): + self.class_num = 17 def test_check_grad(self): self.check_grad( ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) -class TestCrossEntropyOp7(OpTest): +class TestCrossEntropyOp7(TestCrossEntropyOp): """Test cross-entropy with ignore index. """ - def setUp(self): - self.op_type = "cross_entropy" - batch_size = 30 - class_num = 10 - ignore_index = 3 - - X = randomize_probability(batch_size, class_num, dtype='float64') - - label = np.random.randint(0, class_num, (batch_size, 1), dtype="int64") - cross_entropy = np.asmatrix( - [[-np.log(X[i][label[i][0]])] - if label[i][0] != ignore_index else [0] - for i in range(X.shape[0])], - dtype="float64") - self.inputs = {"X": X, "Label": label} - self.outputs = {"Y": cross_entropy} - self.attrs = {"soft_label": False, "ignore_index": ignore_index} - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(["X"], "Y", numeric_grad_delta=0.001) - + def init_label(self): + self.label = np.random.randint( + 0, self.class_num, (self.batch_size, 1), dtype="int64") + + def get_cross_entropy(self): + self.cross_entropy = np.asmatrix( + [[-np.log(self.x[i][self.label[i][0]])] + if self.label[i][0] != self.ignore_index else [0] + for i in range(self.x.shape[0])]).astype(self.dtype) + + def init_attr_type(self): + self.soft_label = False + self.ignore_index = 3 + + def init_dtype_type(self): + self.dtype = np.float64 + + def init_bs_class_num(self): + self.batch_size = 30 + self.class_num = 10 + + +# Add Fp16 test +def create_test_class(parent, cls_name): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") + class TestCrossEntropyFP16Op(parent): + def init_dtype_type(self): + return np.float16 + + def test_check_output(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-1) + + def test_check_grad(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_grad_with_place( + place, ['X'], 'Y', max_relative_error=0.9) + + cls_name = "{0}".format(cls_name) + TestCrossEntropyFP16Op.__name__ = cls_name + globals()[cls_name] = TestCrossEntropyFP16Op + + +create_test_class(TestCrossEntropyOp, "TestCrossEntropyF16Op") +#create_test_class(TestCrossEntropyOp2, "TestCrossEntropyF16Op2") +create_test_class(TestCrossEntropyOp3, "TestCrossEntropyF16Op3") +create_test_class(TestCrossEntropyOp4, "TestCrossEntropyF16Op4") +#create_test_class(TestCrossEntropyOp5, "TestCrossEntropyF16Op5") +create_test_class(TestCrossEntropyOp6, "TestCrossEntropyF16Op6") +create_test_class(TestCrossEntropyOp7, "TestCrossEntropyF16Op7") if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_dist_base.py b/python/paddle/fluid/tests/unittests/test_dist_base.py index 07814bc2571b380ec24c825615e3ef3d16e694be..45fae63b01e6a766202408d023e0292579c2857a 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_base.py +++ b/python/paddle/fluid/tests/unittests/test_dist_base.py @@ -37,10 +37,15 @@ class TestDistRunnerBase(object): "get_model should be implemented by child classes.") @staticmethod - def get_transpiler(trainer_id, main_program, pserver_endpoints, trainers, - sync_mode): + def get_transpiler(trainer_id, + main_program, + pserver_endpoints, + trainers, + sync_mode, + dc_asgd=False): # NOTE: import fluid until runtime, or else forking processes will cause error. config = fluid.DistributeTranspilerConfig() + config.enable_dc_asgd = dc_asgd t = fluid.DistributeTranspiler(config=config) t.transpile( trainer_id=trainer_id, @@ -55,7 +60,7 @@ class TestDistRunnerBase(object): # NOTE: pserver should not call memory optimize t = self.get_transpiler(args.trainer_id, fluid.default_main_program(), args.endpoints, - args.trainers, args.sync_mode) + args.trainers, args.sync_mode, args.dc_asgd) pserver_prog = t.get_pserver_program(args.current_endpoint) startup_prog = t.get_startup_program(args.current_endpoint, pserver_prog) @@ -75,8 +80,7 @@ class TestDistRunnerBase(object): t = self.get_transpiler(args.trainer_id, fluid.default_main_program(), args.endpoints, args.trainers, - args.sync_mode) - + args.sync_mode, args.dc_asgd) trainer_prog = t.get_trainer_program() else: trainer_prog = fluid.default_main_program() @@ -155,6 +159,7 @@ def runtime_main(test_class): parser.add_argument('--mem_opt', action='store_true') parser.add_argument('--use_cuda', action='store_true') parser.add_argument('--use_reduce', action='store_true') + parser.add_argument('--dc_asgd', action='store_true') parser.add_argument( '--use_reader_alloc', action='store_true', required=False) parser.add_argument('--batch_size', required=False, type=int, default=2) @@ -200,6 +205,7 @@ class TestDistBase(unittest.TestCase): self._enforce_place = None self._mem_opt = False self._use_reduce = False + self._dc_asgd = False # must use with async mode self._use_reader_alloc = True self._setup_config() self._after_setup_config() diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist.py b/python/paddle/fluid/tests/unittests/test_dist_mnist.py index 922dd838f8996adfc15afffcd44c1acca2bc14a9..81eb651878209164b3f339cc5030dbac847942d1 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist.py @@ -53,6 +53,15 @@ class TestDistMnistAsync(TestDistBase): self.check_with_place("dist_mnist.py", delta=200) +class TestDistMnistDcAsgd(TestDistBase): + def _setup_config(self): + self._sync_mode = False + self._dc_asgd = True + + def test_se_resnext(self): + self.check_with_place("dist_mnist.py", delta=200) + + # FIXME(typhoonzero): enable these tests once we have 4 # 4 GPUs on CI machine, and the base class should be updated. # diff --git a/python/paddle/fluid/tests/unittests/test_dist_save_load.py b/python/paddle/fluid/tests/unittests/test_dist_save_load.py new file mode 100644 index 0000000000000000000000000000000000000000..03066fee48b703f8b55bd4ae6a9c4bb8deecab1e --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_dist_save_load.py @@ -0,0 +1,90 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import print_function + +import os +import shutil +import unittest +import tempfile + +import numpy as np + +from test_dist_base import TestDistBase, RUN_STEP + + +class TestDistSaveLoadDense2x2(TestDistBase): + def _setup_config(self): + self._sync_mode = True + self._enforce_place = "CPU" + + def check_with_place(self, + model_file, + delta=1e-3, + check_error_log=False, + need_envs={}): + + required_envs = { + "PATH": os.getenv("PATH", ""), + "PYTHONPATH": os.getenv("PYTHONPATH", ""), + "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""), + "http_proxy": "" + } + + required_envs.update(need_envs) + + if check_error_log: + required_envs["GLOG_v"] = "7" + required_envs["GLOG_logtostderr"] = "1" + + model_dir = tempfile.mkdtemp() + + local_env = {} + local_env["SAVE"] = "1" + local_env["MODEL_DIR"] = model_dir + local_env.update(required_envs) + + cluster_env = {} + cluster_env["LOAD"] = "1" + cluster_env["MODEL_DIR"] = model_dir + cluster_env.update(required_envs) + + local_var = self._run_local(model_file, local_env, check_error_log) + tr0_var, tr1_var = self._run_cluster(model_file, cluster_env, + check_error_log) + + shutil.rmtree(model_dir) + + local_np = np.array(eval(local_var[0])) + train0_np = np.array(eval(tr0_var[0])) + train1_np = np.array(eval(tr1_var[0])) + self.assertAlmostEqual(local_np.all(), train0_np.all(), delta=delta) + self.assertAlmostEqual(local_np.all(), train1_np.all(), delta=delta) + self.assertAlmostEqual(train0_np.all(), train1_np.all(), delta=delta) + + @unittest.skip(reason="CI fail") + def test_dist(self): + need_envs = { + "IS_DISTRIBUTED": '0', + "IS_SPARSE": '0', + 'IS_SELF_CONTAINED_LR': '1' + } + self.check_with_place( + "dist_save_load.py", + delta=0, + check_error_log=False, + need_envs=need_envs) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_grid_sampler_op.py b/python/paddle/fluid/tests/unittests/test_grid_sampler_op.py new file mode 100644 index 0000000000000000000000000000000000000000..c2529e0d70c9a359d2a44c671769d50a92650a73 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_grid_sampler_op.py @@ -0,0 +1,123 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +def AffineGrid(theta, size): + n = size[0] + h = size[2] + w = size[3] + h_idx = np.repeat( + np.linspace(-1, 1, h)[np.newaxis, :], w, axis=0).T[:, :, np.newaxis] + w_idx = np.repeat( + np.linspace(-1, 1, w)[np.newaxis, :], h, axis=0)[:, :, np.newaxis] + grid = np.concatenate( + [w_idx, h_idx, np.ones([h, w, 1])], axis=2) # h * w * 3 + grid = np.repeat(grid[np.newaxis, :], size[0], axis=0) # n * h * w *3 + + ret = np.zeros([n, h * w, 2]) + theta = theta.transpose([0, 2, 1]) + for i in range(len(theta)): + ret[i] = np.dot(grid[i].reshape([h * w, 3]), theta[i]) + + return ret.reshape([n, h, w, 2]).astype("float32") + + +def getGridPointValue(data, x, y): + data_shape = data.shape + N = data_shape[0] + H = data_shape[2] + W = data_shape[3] + + out = np.zeros(data_shape, dtype='float') + for i in range(N): + for j in range(H): + for k in range(W): + if y[i, j, k] < 0 or y[i, j, k] > H - 1 or x[i, j, k] < 0 or x[ + i, j, k] > W - 1: + out[i, :, j, k] = 0 + else: + out[i, :, j, k] = data[i, :, y[i, j, k], x[i, j, k]] + + return out + + +def GridSampler(data, grid): + dims = data.shape + N = dims[0] + C = dims[1] + H = dims[2] + W = dims[3] + + x = grid[:, :, :, 0] + y = grid[:, :, :, 1] + y_max = H - 1 + x_max = W - 1 + + x = 0.5 * ((x.astype('float32') + 1.0) * x_max) + y = 0.5 * ((y.astype('float32') + 1.0) * y_max) + + x0 = np.floor(x).astype('int32') + x1 = x0 + 1 + y0 = np.floor(y).astype('int32') + y1 = y0 + 1 + + wa = np.tile(((x1 - x) * (y1 - y)).reshape((N, 1, H, W)), (1, C, 1, 1)) + wb = np.tile(((x1 - x) * (y - y0)).reshape((N, 1, H, W)), (1, C, 1, 1)) + wc = np.tile(((x - x0) * (y1 - y)).reshape((N, 1, H, W)), (1, C, 1, 1)) + wd = np.tile(((x - x0) * (y - y0)).reshape((N, 1, H, W)), (1, C, 1, 1)) + + va = getGridPointValue(data, x0, y0) + vb = getGridPointValue(data, x0, y1) + vc = getGridPointValue(data, x1, y0) + vd = getGridPointValue(data, x1, y1) + + out = (wa * va + wb * vb + wc * vc + wd * vd).astype('float32') + return out + + +class TestGridSamplerOp(OpTest): + def setUp(self): + self.initTestCase() + self.op_type = 'grid_sampler' + x = np.random.randint(0, 255, self.x_shape).astype('float32') + + theta = np.zeros(self.theta_shape).astype('float32') + for i in range(self.theta_shape[0]): + for j in range(2): + for k in range(3): + theta[i, j, k] = np.random.rand(1)[0] + grid = AffineGrid(theta, self.x_shape) + + self.inputs = {'X': x, 'Grid': grid} + self.attrs = {'use_cudnn': True} + self.outputs = {'Output': GridSampler(x, grid)} + + def test_check_output(self): + self.check_output(atol=1e-3) + + def test_check_grad_normal(self): + self.check_grad(['X', 'Grid'], 'Output', max_relative_error=0.61) + + def initTestCase(self): + self.x_shape = (2, 5, 7, 3) + self.grid_shape = (2, 7, 3, 2) + self.theta_shape = (2, 2, 3) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 50de468dba803d0a2a0c129ad04aac8a3822cdbc..c4ecc2c2c2563fcad09821453ee73e41f81407d5 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -865,6 +865,31 @@ class TestBook(unittest.TestCase): self.assertIsNotNone(out) print(str(program)) + def test_grid_sampler(self): + program = Program() + with program_guard(program): + x = layers.data(name='x', shape=[3, 5, 7], dtype='float32') + grid = layers.data(name='grid', shape=[5, 7, 2], dtype='float32') + out = layers.grid_sampler(x, grid) + self.assertIsNotNone(out) + print(str(program)) + + def test_affine_grid(self): + program = Program() + with program_guard(program): + data = layers.data(name='data', shape=[2, 3, 3], dtype="float32") + out, ids = layers.argsort(input=data, axis=1) + + theta = layers.data(name="theta", shape=[2, 3], dtype="float32") + out_shape = layers.data( + name="out_shape", shape=[-1], dtype="float32") + data_0 = layers.affine_grid(theta, out_shape) + data_1 = layers.affine_grid(theta, [5, 3, 28, 28]) + + self.assertIsNotNone(data_0) + self.assertIsNotNone(data_1) + print(str(program)) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_mean_op.py b/python/paddle/fluid/tests/unittests/test_mean_op.py index ff338f0e0037307e81a92eed804096c9a2a87361..beae909e9b4c88eb7ddfbbe4e5ad2cf583a953ef 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_op.py +++ b/python/paddle/fluid/tests/unittests/test_mean_op.py @@ -17,14 +17,20 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +import paddle.fluid.core as core class TestMeanOp(OpTest): def setUp(self): self.op_type = "mean" - self.inputs = {'X': np.random.random((10, 10)).astype("float32")} + self.dtype = np.float32 + self.init_dtype_type() + self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)} self.outputs = {'Out': np.mean(self.inputs["X"])} + def init_dtype_type(self): + pass + def test_check_output(self): self.check_output() @@ -32,5 +38,23 @@ class TestMeanOp(OpTest): self.check_grad(['X'], 'Out') +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestFP16MeanOp(TestMeanOp): + def init_dtype_type(self): + self.dtype = np.float16 + + def test_check_output(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-3) + + def test_checkout_grad(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_grad_with_place( + place, ['X'], 'Out', max_relative_error=0.8) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_mul_op.py b/python/paddle/fluid/tests/unittests/test_mul_op.py index fca4ffa88b79ebfad009b436d440e86ddceaaed7..d54326714acf47bd5d2abd2d919b0e3b0cab3546 100644 --- a/python/paddle/fluid/tests/unittests/test_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_mul_op.py @@ -23,12 +23,17 @@ from op_test import OpTest class TestMulOp(OpTest): def setUp(self): self.op_type = "mul" + self.dtype = np.float32 + self.init_dtype_type() self.inputs = { - 'X': np.random.random((2, 5)).astype("float32"), - 'Y': np.random.random((5, 3)).astype("float32") + 'X': np.random.random((2, 5)).astype(self.dtype), + 'Y': np.random.random((5, 3)).astype(self.dtype) } self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} + def init_dtype_type(self): + pass + def test_check_output(self): self.check_output() @@ -47,9 +52,11 @@ class TestMulOp(OpTest): class TestMulOp2(OpTest): def setUp(self): self.op_type = "mul" + self.dtype = np.float32 + self.init_dtype_type() self.inputs = { - 'X': np.random.random((3, 4, 4, 3)).astype("float32"), - 'Y': np.random.random((2, 6, 1, 2, 3)).astype("float32") + 'X': np.random.random((3, 4, 4, 3)).astype(self.dtype), + 'Y': np.random.random((2, 6, 1, 2, 3)).astype(self.dtype) } self.attrs = { 'x_num_col_dims': 2, @@ -60,6 +67,9 @@ class TestMulOp2(OpTest): result = result.reshape(3, 4, 1, 2, 3) self.outputs = {'Out': result} + def init_dtype_type(self): + pass + def test_check_output(self): self.check_output() @@ -75,40 +85,76 @@ class TestMulOp2(OpTest): ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) -class TestFP16MulOp1(OpTest): - def setUp(self): - self.op_type = "mul" - x = np.random.random((3, 5)).astype("float16") - y = np.random.random((5, 4)).astype("float16") - self.inputs = {'X': x.view(np.float16), 'Y': y.view(np.float16)} - self.outputs = {'Out': np.dot(x, y)} +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestFP16MulOp1(TestMulOp): + def init_dtype_type(self): + self.dtype = np.float16 def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-1) + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-1) + def test_check_grad_normal(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_grad_with_place( + place, ['X', 'Y'], 'Out', max_relative_error=0.5) -class TestFP16MulOp2(OpTest): - def setUp(self): - self.op_type = "mul" - x = np.random.random((3, 4, 4, 3)).astype("float16") - y = np.random.random((2, 6, 1, 2, 3)).astype("float16") - self.inputs = {'X': x.view(np.float16), 'Y': y.view(np.float16)} - self.attrs = { - 'x_num_col_dims': 2, - 'y_num_col_dims': 2, - } - result = np.dot(x.reshape(3 * 4, 4 * 3), y.reshape(2 * 6, 1 * 2 * 3)) - result = result.reshape(3, 4, 1, 2, 3) - self.outputs = {'Out': result} + def test_check_grad_ingore_x(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_grad_with_place( + place, ['Y'], + 'Out', + max_relative_error=0.5, + no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_grad_with_place( + place, ['X'], + 'Out', + max_relative_error=0.5, + no_grad_set=set('Y')) + + +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestFP16MulOp2(TestMulOp2): + def init_dtype_type(self): + self.dtype = np.float16 def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=2e-1) + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-1) + + def test_check_grad_normal(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_grad_with_place( + place, ['X', 'Y'], 'Out', max_relative_error=0.9) + + def test_check_grad_ingore_x(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_grad_with_place( + place, ['Y'], + 'Out', + max_relative_error=0.5, + no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_grad_with_place( + place, ['X'], + 'Out', + max_relative_error=0.9, + no_grad_set=set('Y')) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py index 6d6917300cb66afcc8a0c509986a0f26be8b1f09..84b0aad8acb096a32f625e32fb640599f2882d97 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py @@ -16,6 +16,7 @@ from __future__ import print_function import paddle.dataset.conll05 as conll05 import paddle.fluid as fluid +import paddle.fluid.core as core import unittest import paddle import numpy as np @@ -174,39 +175,39 @@ class TestCRFModel(unittest.TestCase): print(pe.run(feed=feeder.feed(cur_batch), fetch_list=[avg_cost.name])[0]) - @unittest.skip(reason="CI hangs") def test_update_sparse_parameter_all_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce - self.check_network_convergence( - is_sparse=True, build_strategy=build_strategy, use_cuda=True) + if core.is_compiled_with_cuda(): + self.check_network_convergence( + is_sparse=True, build_strategy=build_strategy, use_cuda=True) self.check_network_convergence( is_sparse=True, build_strategy=build_strategy, use_cuda=False) - @unittest.skip(reason="CI hangs") def test_update_dense_parameter_all_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce - self.check_network_convergence( - is_sparse=False, build_strategy=build_strategy, use_cuda=True) + if core.is_compiled_with_cuda(): + self.check_network_convergence( + is_sparse=False, build_strategy=build_strategy, use_cuda=True) self.check_network_convergence( is_sparse=False, build_strategy=build_strategy, use_cuda=False) - @unittest.skip(reason="CI hangs") def test_update_sparse_parameter_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce - self.check_network_convergence( - is_sparse=True, build_strategy=build_strategy, use_cuda=True) + if core.is_compiled_with_cuda(): + self.check_network_convergence( + is_sparse=True, build_strategy=build_strategy, use_cuda=True) self.check_network_convergence( is_sparse=True, build_strategy=build_strategy, use_cuda=False) - @unittest.skip(reason="CI hangs") def test_update_dense_parameter_reduce(self): build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce - self.check_network_convergence( - is_sparse=False, build_strategy=build_strategy, use_cuda=True) + if core.is_compiled_with_cuda(): + self.check_network_convergence( + is_sparse=False, build_strategy=build_strategy, use_cuda=True) self.check_network_convergence( is_sparse=False, build_strategy=build_strategy, use_cuda=False) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py new file mode 100644 index 0000000000000000000000000000000000000000..c93740669f40aee3a6c143d153cfd0f5bb72dbd9 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_dry_run.py @@ -0,0 +1,80 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid as fluid +import unittest +import logging +import six + + +class TestBase(unittest.TestCase): + def main(self, + network_func, + iter=100, + iter_per_pe=100, + use_gpu=True, + use_experimental_executor=False): + if use_gpu and not fluid.core.is_compiled_with_cuda(): + logging.warning( + "Paddle is not compiled with CUDA, skip GPU unittests") + return + + main_prog = fluid.Program() + startup_prog = fluid.Program() + scope = fluid.Scope() + with fluid.program_guard(main_prog, startup_prog): + with fluid.scope_guard(scope): + loss = network_func() + fluid.Executor( + fluid.CUDAPlace(0) + if use_gpu else fluid.CPUPlace()).run(startup_prog) + + for _ in six.moves.xrange(iter): + exe_strategy = fluid.ExecutionStrategy() + exe_strategy._dry_run = True + exe_strategy.use_experimental_executor = use_experimental_executor + pe = fluid.ParallelExecutor( + use_cuda=True, + loss_name=loss.name, + main_program=main_prog, + exec_strategy=exe_strategy) + for _ in six.moves.xrange(iter_per_pe): + pe.run([]) + + +class TestMNISTDryRun(TestBase): + def test_mnist_dry_run(self): + for use_gpu in (False, True): + for use_experimental_executor in (False, True): + self.main( + network_func=TestMNISTDryRun.network_func, + use_gpu=use_gpu, + use_experimental_executor=use_experimental_executor) + + @staticmethod + def network_func(): + img = fluid.layers.data(name='img', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + hidden = img + for _ in six.moves.xrange(10): + hidden = fluid.layers.fc(input=img, size=200, act='tanh') + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + fluid.optimizer.Adam().minimize(avg_loss) + return avg_loss + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py index af3745987aa3eae96968bdc6b5c9cd951e9ca6fa..3eecc4670152e72443f731c71d7db67ca8e02e72 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py @@ -14,30 +14,18 @@ from __future__ import print_function -from parallel_executor_test_base import TestParallelExecutorBase -import paddle.fluid as fluid -import paddle.fluid.core as core -import numpy as np -import paddle -import paddle.dataset.mnist as mnist import unittest -import os -MNIST_RECORDIO_FILE = "./mnist_test_pe.recordio" +import numpy as np +import paddle.fluid.core as core +import os +import paddle.fluid as fluid +from parallel_executor_test_base import TestParallelExecutorBase def simple_fc_net(use_feed): - if use_feed: - img = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - else: - reader = fluid.layers.open_files( - filenames=[MNIST_RECORDIO_FILE], - shapes=[[-1, 784], [-1, 1]], - lod_levels=[0, 0], - dtypes=['float32', 'int64']) - reader = fluid.layers.io.double_buffer(reader) - img, label = fluid.layers.read_file(reader) + img = fluid.layers.data(name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') hidden = img for _ in range(4): hidden = fluid.layers.fc( @@ -53,17 +41,8 @@ def simple_fc_net(use_feed): def fc_with_batchnorm(use_feed): - if use_feed: - img = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - else: - reader = fluid.layers.open_files( - filenames=[MNIST_RECORDIO_FILE], - shapes=[[-1, 784], [-1, 1]], - lod_levels=[0, 0], - dtypes=['float32', 'int64']) - reader = fluid.layers.io.double_buffer(reader) - img, label = fluid.layers.read_file(reader) + img = fluid.layers.data(name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') hidden = img for _ in range(1): @@ -88,19 +67,6 @@ class TestMNIST(TestParallelExecutorBase): @classmethod def setUpClass(cls): os.environ['CPU_NUM'] = str(4) - # Convert mnist to recordio file - with fluid.program_guard(fluid.Program(), fluid.Program()): - reader = paddle.batch(mnist.train(), batch_size=4) - feeder = fluid.DataFeeder( - feed_list=[ # order is image and label - fluid.layers.data( - name='image', shape=[784]), - fluid.layers.data( - name='label', shape=[1], dtype='int64'), - ], - place=fluid.CPUPlace()) - fluid.recordio_writer.convert_reader_to_recordio_file( - MNIST_RECORDIO_FILE, reader, feeder) def _init_data(self): np.random.seed(5) @@ -111,10 +77,6 @@ class TestMNIST(TestParallelExecutorBase): def _compare_reduce_and_allreduce(self, model, use_cuda): if use_cuda and not core.is_compiled_with_cuda(): return - self.check_network_convergence( - model, use_cuda=use_cuda, use_reduce=True) - self.check_network_convergence( - model, use_cuda=use_cuda, allow_op_delay=True, use_reduce=True) img, label = self._init_data() @@ -140,9 +102,6 @@ class TestMNIST(TestParallelExecutorBase): def check_simple_fc_convergence(self, use_cuda, use_reduce=False): if use_cuda and not core.is_compiled_with_cuda(): return - self.check_network_convergence(simple_fc_net, use_cuda=use_cuda) - self.check_network_convergence( - simple_fc_net, use_cuda=use_cuda, allow_op_delay=True) img, label = self._init_data() @@ -199,8 +158,6 @@ class TestMNIST(TestParallelExecutorBase): if use_cuda and not core.is_compiled_with_cuda(): return - self.check_network_convergence(fc_with_batchnorm, use_cuda=use_cuda) - img, label = self._init_data() self.check_network_convergence( diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py index cc2d692e18430eb48e6e800106eab0c3739d3f53..e7a56bb6386a812e43e5c1b5c08cd0682aa9223a 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py @@ -232,6 +232,46 @@ class TestResnet(TestParallelExecutorBase): for loss in zip(all_reduce_last_loss, reduce_last_loss): self.assertAlmostEquals(loss[0], loss[1], delta=delta2) + if not use_cuda: + return + + all_reduce_first_loss_seq, all_reduce_last_loss_seq = self.check_network_convergence( + model, + feed_dict={"image": img, + "label": label}, + iter=iter, + batch_size=batch_size, + use_cuda=use_cuda, + use_reduce=False, + optimizer=optimizer, + enable_sequential_execution=True) + + reduce_first_loss_seq, reduce_last_loss_seq = self.check_network_convergence( + model, + feed_dict={"image": img, + "label": label}, + iter=iter, + batch_size=batch_size, + use_cuda=use_cuda, + use_reduce=True, + optimizer=optimizer, + enable_sequential_execution=True) + + for loss in zip(all_reduce_first_loss, all_reduce_first_loss_seq): + self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) + for loss in zip(all_reduce_last_loss, all_reduce_last_loss_seq): + self.assertAlmostEquals(loss[0], loss[1], delta=delta2) + + for loss in zip(reduce_first_loss, reduce_first_loss_seq): + self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) + for loss in zip(reduce_last_loss, reduce_last_loss_seq): + self.assertAlmostEquals(loss[0], loss[1], delta=delta2) + + for loss in zip(all_reduce_first_loss_seq, reduce_first_loss_seq): + self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) + for loss in zip(all_reduce_last_loss_seq, reduce_last_loss_seq): + self.assertAlmostEquals(loss[0], loss[1], delta=delta2) + def _check_resnet_convergence(self, model, use_cuda=True, diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py index a55b2002ed989d4588716202a37aa6f4139825ea..3827743908c1d76931572277323d1dd5ddd05523 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py @@ -173,6 +173,8 @@ class TestTransformer(TestParallelExecutorBase): def test_main(self): if core.is_compiled_with_cuda(): self.check_network_convergence(transformer, use_cuda=True) + self.check_network_convergence( + transformer, use_cuda=True, enable_sequential_execution=True) self.check_network_convergence(transformer, use_cuda=False, iter=5) diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_mkldnn_op.py index 14d7ed9057d622b136056e1b5bbbe57f9a04d5d7..19f29c78269eef6414342a200b719e498c2037d2 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_mkldnn_op.py @@ -15,10 +15,10 @@ from __future__ import print_function import unittest -from test_pool2d_op import TestPool2d_Op, TestCase1, TestCase2, TestCase3, TestCase4, TestCase5 +from test_pool2d_op import TestPool2D_Op, TestCase1, TestCase2, TestCase3, TestCase4, TestCase5 -class TestMKLDNNCase1(TestPool2d_Op): +class TestMKLDNNCase1(TestPool2D_Op): def init_kernel_type(self): self.use_mkldnn = True diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index 26969bd5230afdac83a943d2dc21094a0972d60a..47b2e71a4e52a327831fde7494bd7a2306b6f2ea 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -26,7 +26,8 @@ def max_pool2D_forward_naive(x, strides, paddings, global_pool=0, - ceil_mode=False): + ceil_mode=False, + exclusive=True): N, C, H, W = x.shape if global_pool == 1: ksize = [H, W] @@ -54,7 +55,8 @@ def avg_pool2D_forward_naive(x, strides, paddings, global_pool=0, - ceil_mode=False): + ceil_mode=False, + exclusive=True): N, C, H, W = x.shape if global_pool == 1: ksize = [H, W] @@ -73,12 +75,13 @@ def avg_pool2D_forward_naive(x, c_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) x_masked = x[:, :, r_start:r_end, c_start:c_end] - out[:, :, i, j] = np.sum(x_masked, axis=(2, 3)) / ( - (r_end - r_start) * (c_end - c_start)) + field_size = ((r_end - r_start) * (c_end - c_start)) if exclusive \ + else (ksize[0] * ksize[1]) + out[:, :, i, j] = np.sum(x_masked, axis=(2, 3)) / field_size return out -class TestPool2d_Op(OpTest): +class TestPool2D_Op(OpTest): def setUp(self): self.op_type = "pool2d" self.use_cudnn = False @@ -89,12 +92,13 @@ class TestPool2d_Op(OpTest): self.init_kernel_type() self.init_pool_type() self.init_ceil_mode() + self.init_exclusive() if self.global_pool: self.paddings = [0 for _ in range(len(self.paddings))] input = np.random.random(self.shape).astype(self.dtype) - output = self.pool2D_forward_naive(input, self.ksize, self.strides, - self.paddings, self.global_pool, - self.ceil_mode).astype(self.dtype) + output = self.pool2D_forward_naive( + input, self.ksize, self.strides, self.paddings, self.global_pool, + self.ceil_mode, self.exclusive).astype(self.dtype) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} self.attrs = { @@ -106,7 +110,9 @@ class TestPool2d_Op(OpTest): 'use_cudnn': self.use_cudnn, 'use_mkldnn': self.use_mkldnn, 'ceil_mode': self.ceil_mode, - 'data_format': 'AnyLayout' # TODO(dzhwinter) : should be fix latter + 'data_format': + 'AnyLayout', # TODO(dzhwinter) : should be fix latter + 'exclusive': self.exclusive } self.outputs = {'Out': output} @@ -150,8 +156,11 @@ class TestPool2d_Op(OpTest): def init_ceil_mode(self): self.ceil_mode = False + def init_exclusive(self): + self.exclusive = True -class TestCase1(TestPool2d_Op): + +class TestCase1(TestPool2D_Op): def init_test_case(self): self.shape = [2, 3, 7, 7] self.ksize = [3, 3] @@ -166,7 +175,7 @@ class TestCase1(TestPool2d_Op): self.global_pool = False -class TestCase2(TestPool2d_Op): +class TestCase2(TestPool2D_Op): def init_test_case(self): self.shape = [2, 3, 7, 7] self.ksize = [3, 3] @@ -181,7 +190,7 @@ class TestCase2(TestPool2d_Op): self.global_pool = False -class TestCase3(TestPool2d_Op): +class TestCase3(TestPool2D_Op): def init_pool_type(self): self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive @@ -199,127 +208,111 @@ class TestCase5(TestCase2): self.pool2D_forward_naive = max_pool2D_forward_naive -#--------------------test pool2d-------------------- -class TestCUDNNCase1(TestPool2d_Op): - def init_kernel_type(self): - self.use_cudnn = True - - -class TestFP16CUDNNCase1(TestPool2d_Op): - def init_kernel_type(self): - self.use_cudnn = True - self.dtype = np.float16 - - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) +#--------------------test pool2d cudnn-------------------- -class TestCUDNNCase2(TestCase1): - def init_kernel_type(self): - self.use_cudnn = True +def create_test_cudnn_class(parent): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") + class TestCUDNNCase(parent): + def init_kernel_type(self): + self.use_cudnn = True + cls_name = "{0}_{1}".format(parent.__name__, "CUDNNOp") + TestCUDNNCase.__name__ = cls_name + globals()[cls_name] = TestCUDNNCase -class TestFP16CUDNNCase2(TestCase1): - def init_kernel_type(self): - self.use_cudnn = True - self.dtype = np.float16 - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) +create_test_cudnn_class(TestPool2D_Op) +create_test_cudnn_class(TestCase1) +create_test_cudnn_class(TestCase2) +create_test_cudnn_class(TestCase3) +create_test_cudnn_class(TestCase4) +create_test_cudnn_class(TestCase5) +#--------------------test pool2d cudnn_fp16-------------------- -class TestCUDNNCase3(TestCase2): - def init_kernel_type(self): - self.use_cudnn = True +def create_test_cudnn_fp16_class(parent, check_grad=True): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") + class TestCUDNNFp16Case(parent): + def init_kernel_type(self): + self.use_cudnn = True + self.dtype = np.float16 -class TestFP16CUDNNCase3(TestCase2): - def init_kernel_type(self): - self.use_cudnn = True - self.dtype = np.float16 + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) - def test_check_output(self): - if core.is_compiled_with_cuda(): + def test_check_grad(self): place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) - - -class TestCUDNNCase4(TestCase3): - def init_kernel_type(self): - self.use_cudnn = True - + if core.is_float16_supported( + place) and self.pool_type != "max" and check_grad: + self.check_grad_with_place( + place, set(['X']), 'Out', max_relative_error=0.07) -class TestFP16CUDNNCase4(TestCase3): - def init_kernel_type(self): - self.use_cudnn = True - self.dtype = np.float16 + cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16Op") + TestCUDNNFp16Case.__name__ = cls_name + globals()[cls_name] = TestCUDNNFp16Case - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) +create_test_cudnn_fp16_class(TestPool2D_Op) +create_test_cudnn_fp16_class(TestCase1, check_grad=False) +create_test_cudnn_fp16_class(TestCase2) +create_test_cudnn_fp16_class(TestCase3) +create_test_cudnn_fp16_class(TestCase4) +create_test_cudnn_fp16_class(TestCase5) -class TestCUDNNCase5(TestCase4): - def init_kernel_type(self): - self.use_cudnn = True +#--------------------test pool2d use ceil mode-------------------- -class TestFP16CUDNNCase5(TestCase4): - def init_kernel_type(self): - self.use_cudnn = True - self.dtype = np.float16 +def create_test_cudnn_use_ceil_class(parent): + @unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") + class TestPool2DUseCeilCase(parent): + def init_kernel_type(self): + self.use_cudnn = True - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) + def init_ceil_mode(self): + self.ceil_mode = True + cls_name = "{0}_{1}".format(parent.__name__, "CUDNNOpCeilMode") + TestPool2DUseCeilCase.__name__ = cls_name + globals()[cls_name] = TestPool2DUseCeilCase -class TestCUDNNCase6(TestCase5): - def init_kernel_type(self): - self.use_cudnn = True +create_test_cudnn_use_ceil_class(TestPool2D_Op) +create_test_cudnn_use_ceil_class(TestCase1) -class TestFP16CUDNNCase6(TestCase5): - def init_kernel_type(self): - self.use_cudnn = True - self.dtype = np.float16 - def test_check_output(self): - if core.is_compiled_with_cuda(): - place = core.CUDAPlace(0) - if core.is_float16_supported(place): - self.check_output_with_place(place, atol=1e-3) +def create_test_use_ceil_class(parent): + class TestPool2DUseCeilCase(parent): + def init_ceil_mode(self): + self.ceil_mode = True + cls_name = "{0}_{1}".format(parent.__name__, "CeilModeCast") + TestPool2DUseCeilCase.__name__ = cls_name + globals()[cls_name] = TestPool2DUseCeilCase -class TestCeilModeCase1(TestCUDNNCase1): - def init_ceil_mode(self): - self.ceil_mode = True +create_test_use_ceil_class(TestCase1) +create_test_use_ceil_class(TestCase2) -class TestCeilModeCase2(TestCUDNNCase2): - def init_ceil_mode(self): - self.ceil_mode = True +class TestAvgInclude(TestCase2): + def init_exclusive(self): + self.exclusive = False -class TestCeilModeCase3(TestCase1): - def init_ceil_mode(self): - self.ceil_mode = True +class TestCUDNNAvgInclude(TestCase2): + def init_kernel_type(self): + self.use_cudnn = True -class TestCeilModeCase4(TestCase2): - def init_ceil_mode(self): - self.ceil_mode = True + def init_exclusive(self): + self.exclusive = False if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_pool3d_op.py b/python/paddle/fluid/tests/unittests/test_pool3d_op.py index 77045c1307baead3711d58ed368dfa5f2acc3699..f05f8ccb3985be162d89da099496d5b2baf4afdc 100644 --- a/python/paddle/fluid/tests/unittests/test_pool3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool3d_op.py @@ -26,7 +26,8 @@ def max_pool3D_forward_naive(x, strides, paddings, global_pool=0, - ceil_mode=False): + ceil_mode=False, + exclusive=True): N, C, D, H, W = x.shape if global_pool == 1: ksize = [D, H, W] @@ -60,7 +61,8 @@ def avg_pool3D_forward_naive(x, strides, paddings, global_pool=0, - ceil_mode=False): + ceil_mode=False, + exclusive=True): N, C, D, H, W = x.shape if global_pool == 1: ksize = [D, H, W] @@ -85,8 +87,10 @@ def avg_pool3D_forward_naive(x, w_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end] - out[:, :, k, i, j] = np.sum(x_masked, axis=(2, 3, 4)) / ( - (d_end - d_start) * (h_end - h_start) * (w_end - w_start)) + field_size = (d_end - d_start) * (h_end - h_start) * (w_end - w_start) \ + if exclusive else ksize[0] * ksize[1] * ksize[2] + out[:, :, k, i, j] = np.sum(x_masked, axis=(2, 3, + 4)) / field_size return out @@ -100,13 +104,14 @@ class TestPool3d_Op(OpTest): self.init_kernel_type() self.init_pool_type() self.init_ceil_mode() + self.init_exclusive() if self.global_pool: self.paddings = [0 for _ in range(len(self.paddings))] input = np.random.random(self.shape).astype(self.dtype) - output = self.pool3D_forward_naive(input, self.ksize, self.strides, - self.paddings, self.global_pool, - self.ceil_mode).astype(self.dtype) + output = self.pool3D_forward_naive( + input, self.ksize, self.strides, self.paddings, self.global_pool, + self.ceil_mode, self.exclusive).astype(self.dtype) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} self.attrs = { @@ -117,7 +122,9 @@ class TestPool3d_Op(OpTest): 'global_pooling': self.global_pool, 'use_cudnn': self.use_cudnn, 'ceil_mode': self.ceil_mode, - 'data_format': 'AnyLayout' # TODO(dzhwinter) : should be fix latter + 'data_format': + 'AnyLayout', # TODO(dzhwinter) : should be fix latter + 'exclusive': self.exclusive } self.outputs = {'Out': output} @@ -161,6 +168,9 @@ class TestPool3d_Op(OpTest): def init_ceil_mode(self): self.ceil_mode = False + def init_exclusive(self): + self.exclusive = True + class TestCase1(TestPool3d_Op): def init_test_case(self): @@ -333,5 +343,15 @@ class TestCeilModeCase4(TestCase2): self.ceil_mode = True +class TestAvgInclude(TestCase2): + def init_exclusive(self): + self.exclusive = False + + +class TestCUDNNAvgInclude(TestCUDNNCase3): + def init_exclusive(self): + self.exclusive = False + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_lod_level_share.py b/python/paddle/fluid/tests/unittests/test_py_reader_lod_level_share.py new file mode 100644 index 0000000000000000000000000000000000000000..55dc3a7aa341ff09eb3d7d219cd1c23427e25da1 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_py_reader_lod_level_share.py @@ -0,0 +1,43 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid as fluid +import unittest + + +class TestLoDLevelShare(unittest.TestCase): + def setUp(self): + self.use_double_buffer = False + + def test_lod_level_share(self): + reader = fluid.layers.py_reader( + capacity=16, + shapes=([-1, 256], [-1, 512], [-1, 100]), + dtypes=('float32', 'int64', 'double'), + lod_levels=(1, 2, 0), + use_double_buffer=self.use_double_buffer) + + x, y, z = fluid.layers.read_file(reader) + self.assertEqual(x.lod_level, 1) + self.assertEqual(y.lod_level, 2) + self.assertEqual(z.lod_level, 0) + + +class TestLoDLevelShare2(TestLoDLevelShare): + def setUp(self): + self.use_double_buffer = True + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_pin_memory.py b/python/paddle/fluid/tests/unittests/test_py_reader_pin_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..b913127ad625eb25de3ec36edd2161019ed09749 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_py_reader_pin_memory.py @@ -0,0 +1,130 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +import numpy as np +from threading import Thread + + +def user_reader(inputs): + def _reader(): + for d in inputs: + yield d + + return _reader + + +def batch_feeder(batch_reader, pin_memory=False, img_dtype="float32"): + def _feeder(): + for batch_data in batch_reader(): + sample_batch = [] + label_batch = [] + for sample, label in batch_data: + sample_batch.append(sample) + label_batch.append([label]) + tensor = core.LoDTensor() + label = core.LoDTensor() + place = core.CUDAPinnedPlace() if pin_memory else core.CPUPlace() + tensor.set(np.array(sample_batch, dtype=img_dtype), place) + label.set(np.array(label_batch, dtype="int64"), place) + yield [tensor, label] + + return _feeder + + +class TestPyReader(unittest.TestCase): + def setUp(self): + self.capacity = 10 + self.shapes = [(-1, 3, 2, 1), (-1, 1)] + self.lod_levels = [0, 0] + self.dtypes = ['float32', 'int64'] + + def test_pin_memory_pyreader(self): + with fluid.program_guard(fluid.Program(), fluid.Program()): + place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( + ) else fluid.CPUPlace() + executor = fluid.Executor(place) + + data_file = fluid.layers.py_reader( + capacity=self.capacity, + dtypes=self.dtypes, + lod_levels=self.lod_levels, + shapes=self.shapes) + # feed_queue = data_file.queue + read_out_data = fluid.layers.read_file(data_file) + + self.inputs = [] + for _ in range(10): + sample = np.random.uniform( + low=0, high=1, size=[3, 2, 1]).astype("float32") + label = np.random.uniform( + low=0, high=10, size=[1]).astype("int64") + self.inputs.append((sample, label)) + + self.input_tensors = [] + for d, l in batch_feeder( + paddle.batch( + user_reader(self.inputs), batch_size=2), + pin_memory=True + if fluid.core.is_compiled_with_cuda() else False)(): + ta = fluid.LoDTensorArray() + ta.append(d) + ta.append(l) + self.input_tensors.append(ta) + + self.batched_inputs = [] + for batch in paddle.batch(user_reader(self.inputs), batch_size=2)(): + feed_d = [] + feed_l = [] + for d, l in batch: + feed_d.append(d) + feed_l.append([l]) + self.batched_inputs.append([feed_d, feed_l]) + + data_file.decorate_tensor_provider( + batch_feeder( + paddle.batch( + user_reader(self.inputs), batch_size=2), + pin_memory=True + if fluid.core.is_compiled_with_cuda() else False)) + + executor.run(fluid.default_startup_program()) + self.outputs = [] + + data_file.start() + for _ in self.input_tensors: + self.outputs.append( + executor.run(fetch_list=list(read_out_data))) + data_file.reset() + self.validate() + + def validate(self): + self.assertEqual(len(self.batched_inputs), len(self.outputs)) + for in_data_list, out_data_list in zip(self.batched_inputs, + self.outputs): + self.assertEqual(len(in_data_list), len(out_data_list)) + in_data_list_np = [ + np.array(in_lod_tensor) for in_lod_tensor in in_data_list + ] + for in_data, out_data in zip(in_data_list_np, out_data_list): + self.assertTrue((in_data == out_data).all()) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py b/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py index b7fad9b3a60632adb564e1d155a3d935706b467f..d94494e219c5f348a08b4c3c2d111674ea6badf3 100644 --- a/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py +++ b/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py @@ -53,15 +53,24 @@ def simple_fc_net(in_size, hidden_sizes, batch_size, queue_capacity, - use_double_buffer=False): - reader = fluid.layers.py_reader( - capacity=queue_capacity, - shapes=[[-1, in_size], [-1, 1]], - lod_levels=[0, 0], - dtypes=['float32', 'int64'], - use_double_buffer=False) - feed_queue = reader.queue - reader = fluid.layers.batch(reader, batch_size=batch_size) + use_double_buffer=False, + use_feed_list=True): + if use_feed_list: + data = fluid.layers.data(name="data", dtype='float32', shape=[in_size]) + label = fluid.layers.data(name='label', dtype='int64', shape=[1]) + py_reader = fluid.layers.create_py_reader_by_data( + capacity=queue_capacity, + use_double_buffer=False, + feed_list=[data, label]) + else: + py_reader = fluid.layers.py_reader( + capacity=queue_capacity, + shapes=[[-1, in_size], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64'], + use_double_buffer=False) + feed_queue = py_reader.queue + reader = fluid.layers.batch(py_reader, batch_size=batch_size) if use_double_buffer: reader = fluid.layers.double_buffer(reader) @@ -83,7 +92,7 @@ def simple_fc_net(in_size, optimizer = fluid.optimizer.Adam() optimizer.minimize(loss) - return in_data, label, loss, optimizer, feed_queue + return in_data, label, loss, optimizer, feed_queue, py_reader class TestPyReaderUsingExecutor(unittest.TestCase): @@ -100,16 +109,22 @@ class TestPyReaderUsingExecutor(unittest.TestCase): if core.is_compiled_with_cuda() else [False]): for use_parallel_executor in [False, True]: for use_double_buffer in [False, True]: - print('Test Parameters:'), - print({ - 'use_cuda': use_cuda, - 'use_parallel_executor': use_parallel_executor, - 'use_double_buffer': use_double_buffer - }) - self.main(use_cuda, use_parallel_executor, - use_double_buffer) - - def random_reader(self): + for use_feed_list in [False, True]: + for use_decorate_paddle_reader in [False, True]: + print('Test Parameters:'), + print({ + 'use_cuda': use_cuda, + 'use_parallel_executor': use_parallel_executor, + 'use_double_buffer': use_double_buffer, + 'use_feed_list': use_feed_list, + 'use_decorate_paddle_reader': + use_decorate_paddle_reader + }) + self.main(use_cuda, use_parallel_executor, + use_double_buffer, use_feed_list, + use_decorate_paddle_reader) + + def tensor_reader(self, use_decorate_paddle_reader): def reader(): self.inputs = [] cnt = 0 @@ -133,34 +148,43 @@ class TestPyReaderUsingExecutor(unittest.TestCase): elif not self.use_double_buffer: break - yield tensors + if use_decorate_paddle_reader: + yield [(in_data, label)] + else: + yield tensors cnt += 1 - yield None + if not use_decorate_paddle_reader: + yield None return reader def main(self, use_cuda=True, use_parallel_executor=False, - use_double_buffer=False): + use_double_buffer=False, + use_feed_list=False, + use_decorate_paddle_reader=False): assert not use_cuda or use_cuda and core.is_compiled_with_cuda() self.use_cuda = use_cuda self.use_parallel_executor = use_parallel_executor self.use_double_buffer = use_double_buffer + self.use_feed_list = use_feed_list + self.use_decorate_paddle_reader = use_decorate_paddle_reader startup_program = fluid.Program() main_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - in_data, label, loss, optimizer, feed_queue = simple_fc_net( + in_data, label, loss, optimizer, feed_queue, py_reader = simple_fc_net( in_size=self.in_size, class_num=self.class_num, hidden_sizes=self.hidden_sizes, batch_size=self.batch_size, queue_capacity=self.queue_capacity, - use_double_buffer=self.use_double_buffer) + use_double_buffer=self.use_double_buffer, + use_feed_list=self.use_feed_list) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() @@ -178,10 +202,14 @@ class TestPyReaderUsingExecutor(unittest.TestCase): main_exe = startup_exe self.batch_size_times = 1 - reader = self.random_reader() - thread = threading.Thread( - target=feed_data, args=(feed_queue, reader)) - thread.start() + reader = self.tensor_reader(use_decorate_paddle_reader) + if use_decorate_paddle_reader: + py_reader.decorate_paddle_reader(reader) + py_reader.start() + else: + thread = threading.Thread( + target=feed_data, args=(feed_queue, reader)) + thread.start() self.outputs = [] for _ in range(self.iterations): diff --git a/python/paddle/fluid/tests/unittests/test_ref_by_trainer_id_op.py b/python/paddle/fluid/tests/unittests/test_ref_by_trainer_id_op.py new file mode 100644 index 0000000000000000000000000000000000000000..e4872829edb325edcadbd4e1aefaf5014b800d3a --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_ref_by_trainer_id_op.py @@ -0,0 +1,36 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +class TestRefByTrainerIdOp(OpTest): + def setUp(self): + self.op_type = "ref_by_trainer_id" + param_baks = [("x%d" % x, np.random.random((10, 10)).astype("float32")) + for x in range(10)] + self.inputs = { + 'X': param_baks, + 'TrainerId': np.array([8]).astype("int64") + } + self.outputs = {'Out': param_baks[8][1]} + + def test_check_output(self): + self.check_output() + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_scale_op.py b/python/paddle/fluid/tests/unittests/test_scale_op.py index 032af6ed5ce9e1007d6775306ef4c0aefb9dcc41..9893c92ad68f4d460c4bb428bb44a30df25fd6e0 100644 --- a/python/paddle/fluid/tests/unittests/test_scale_op.py +++ b/python/paddle/fluid/tests/unittests/test_scale_op.py @@ -24,9 +24,16 @@ from paddle.fluid.op import Operator class TestScaleOp(OpTest): def setUp(self): self.op_type = "scale" - self.inputs = {'X': np.random.random((10, 10)).astype("float32")} + self.dtype = np.float32 + self.init_dtype_type() + self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)} self.attrs = {'scale': -2.3} - self.outputs = {'Out': self.inputs['X'] * self.attrs['scale']} + self.outputs = { + 'Out': self.inputs['X'] * self.dtype(self.attrs['scale']) + } + + def init_dtype_type(self): + pass def test_check_output(self): self.check_output() @@ -36,9 +43,15 @@ class TestScaleOp(OpTest): class TestScaleOpSelectedRows(unittest.TestCase): + def init_dtype_type(self): + pass + def check_with_place(self, place, in_name, out_name): scope = core.Scope() + self.dtype = np.float32 + self.init_dtype_type() + # create and initialize Grad Variable in_height = 10 in_rows = [0, 4, 7] @@ -49,7 +62,7 @@ class TestScaleOpSelectedRows(unittest.TestCase): in_selected_rows.set_height(in_height) in_selected_rows.set_rows(in_rows) in_array = np.random.random( - (len(in_rows), in_row_numel)).astype("float32") + (len(in_rows), in_row_numel)).astype(self.dtype) in_tensor = in_selected_rows.get_tensor() in_tensor.set(in_array, place) @@ -87,5 +100,41 @@ class TestScaleOpSelectedRows(unittest.TestCase): self.check_with_place(place, 'in', 'in') +# Add FP16 test +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestScaleFp16Op(TestScaleOp): + def init_dtype_type(self): + self.dtype = np.float16 + + def test_check_output(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=0.002) + + def test_check_grad(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_grad_with_place( + place, ["X"], "Out", max_relative_error=0.05) + + +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestScaleFp16OpSelectedRows(TestScaleOpSelectedRows): + def init_dtype_type(self): + self.dtype = np.float16 + + def test_scale_selected_rows(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_with_place(place, 'in', 'out') + + def test_scale_selected_rows_inplace(self): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_with_place(place, 'in', 'in') + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index d88aa1ae1c9d848eba7a2224d22b5201fc27b857..40c3135183a128cd9b7324ce27da798fa2d93afd 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -62,12 +62,11 @@ class TestSoftmaxOp(OpTest): self.check_output() def test_check_grad(self): - if self.dtype == np.float16: - return - if self.use_cudnn: + if self.use_cudnn or self.dtype == np.float16: place = core.CUDAPlace(0) - self.check_grad_with_place( - place, ["X"], "Out", max_relative_error=0.01) + if core.is_float16_supported(place): + self.check_grad_with_place( + place, ["X"], "Out", max_relative_error=0.01) else: self.check_grad(["X"], "Out", max_relative_error=0.01) @@ -103,10 +102,23 @@ class TestSoftmaxFP16Op(TestSoftmaxOp): if core.is_float16_supported(place): self.check_output_with_place(place, atol=1e-3) + # FIXME: If the x_shape is [10, 10], gradient failed. + def test_check_grad(self): + pass + @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") -class TestSoftmaxFP16Op2(TestSoftmaxFP16Op): +class TestSoftmaxFP16Op2(TestSoftmaxOp): + def init_kernel_type(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=1e-3) + def get_x_shape(self): return [2, 3, 4, 5] diff --git a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py index a18941dd3126ac027f022ddafbbaed8516166233..37ee880970cf7f6f235e7c43697b2b7872bed38b 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py @@ -26,7 +26,11 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): Test softmax with cross entropy operator with discreate one-hot labels. """ + def initParams(self): + self.numeric_stable_mode = False + def setUp(self): + self.initParams() self.op_type = "softmax_with_cross_entropy" batch_size = 41 class_num = 37 @@ -46,6 +50,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): "Softmax": softmax.astype("float64"), "Loss": cross_entropy.astype("float64") } + self.attrs = {"numeric_stable_mode": self.numeric_stable_mode} def test_check_output(self): self.check_output() @@ -54,6 +59,11 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): self.check_grad(["Logits"], "Loss") +class TestSoftmaxWithCrossEntropyOpNoCudnn(TestSoftmaxWithCrossEntropyOp): + def initParams(self): + self.numeric_stable_mode = True + + class TestSoftmaxWithCrossEntropyOp2(OpTest): """ Test softmax with cross entropy operator with soft labels. @@ -93,7 +103,11 @@ class TestSoftmaxWithCrossEntropyOp3(OpTest): Test softmax with cross entropy operator with ignore_index. """ + def initParams(self): + self.numeric_stable_mode = False + def setUp(self): + self.initParams() self.op_type = "softmax_with_cross_entropy" batch_size = 41 class_num = 37 @@ -114,7 +128,10 @@ class TestSoftmaxWithCrossEntropyOp3(OpTest): "Softmax": softmax.astype("float64"), "Loss": cross_entropy.astype("float64") } - self.attrs = {"ignore_index": ignore_index} + self.attrs = { + "ignore_index": ignore_index, + "numeric_stable_mode": self.numeric_stable_mode + } def test_check_output(self): self.check_output() @@ -123,5 +140,10 @@ class TestSoftmaxWithCrossEntropyOp3(OpTest): self.check_grad(["Logits"], "Loss") +class TestSoftmaxWithCrossEntropyOp3NoCudnn(TestSoftmaxWithCrossEntropyOp3): + def initParams(self): + self.numeric_stable_mode = True + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_sum_op.py b/python/paddle/fluid/tests/unittests/test_sum_op.py index e20418ff1c8d21f3a3e4ba15ff2aa9d54f37f4b2..643878dc5c2c2854ad3a1b6429d78519b1670857 100644 --- a/python/paddle/fluid/tests/unittests/test_sum_op.py +++ b/python/paddle/fluid/tests/unittests/test_sum_op.py @@ -24,16 +24,20 @@ from paddle.fluid.op import Operator class TestSumOp(OpTest): def setUp(self): self.op_type = "sum" + self.init_kernel_type() self.use_mkldnn = False self.init_kernel_type() - x0 = np.random.random((3, 4)).astype('float32') - x1 = np.random.random((3, 4)).astype('float32') - x2 = np.random.random((3, 4)).astype('float32') + x0 = np.random.random((3, 4)).astype(self.dtype) + x1 = np.random.random((3, 4)).astype(self.dtype) + x2 = np.random.random((3, 4)).astype(self.dtype) self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]} y = x0 + x1 + x2 self.outputs = {'Out': y} self.attrs = {'use_mkldnn': self.use_mkldnn} + def init_kernel_type(self): + self.dtype = np.float32 + def test_check_output(self): self.check_output() @@ -59,8 +63,11 @@ class TestSelectedRowsSumOp(OpTest): self.check_input_and_optput(core.Scope(), place, inplace, False, False, False) + def init_kernel_type(self): + self.dtype = np.float32 + def _get_array(self, row_num, row_numel): - array = np.ones((row_num, row_numel)).astype("float32") + array = np.ones((row_num, row_numel)).astype(self.dtype) for i in range(row_num): array[i] *= i return array @@ -129,5 +136,36 @@ class TestSelectedRowsSumOp(OpTest): self.check_with_place(place, inplace) +class TestFP16SumOp(TestSumOp): + def init_kernel_type(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-2) + + # FIXME: Because of the precision fp16, max_relative_error + # should be 0.15 here. + def test_check_grad(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_grad(['x0'], 'Out', max_relative_error=0.15) + + +class TestFP16SelectedRowsSumOp(TestSelectedRowsSumOp): + def init_kernel_type(self): + self.dtype = np.float16 + + def test_w_is_selected_rows(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + for inplace in [True, False]: + self.check_with_place(place, inplace) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 4af13b605fa7054df097e3bf0ed0c71f468f02de..7c7fba76718e911907bb9bef69b3e8688bbf52fc 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -38,7 +38,7 @@ import six import logging from .ps_dispatcher import RoundRobin, HashName, PSDispatcher -from .. import core, framework +from .. import core, framework, unique_name from ..framework import Program, default_main_program, \ default_startup_program, Block, \ Parameter, grad_var_name @@ -138,6 +138,7 @@ class DistributeTranspilerConfig(object): slice_var_up = True split_method = None min_block_size = 8192 + enable_dc_asgd = False # supported modes: pserver, nccl2 mode = "pserver" print_log = False @@ -252,6 +253,8 @@ class DistributeTranspiler(object): n workers, the id may range from 0 ~ n-1 program (Program|None): program to transpile, default is fluid.default_main_program(). + startup_program (Program|None): startup_program to transpile, + default is fluid.default_startup_program(). pservers (str): comma separated ip:port string for the pserver list. trainers (int|str): in pserver mode this is the number of @@ -383,6 +386,8 @@ class DistributeTranspiler(object): outputs={"Out": send_barrier_out}, attrs={ "endpoints": pserver_endpoints, + "sync_mode": self.sync_mode, + "trainer_id": self.trainer_id, RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) @@ -426,6 +431,7 @@ class DistributeTranspiler(object): outputs={"Out": splited_var}, attrs={ "epmap": eps, + "trainer_id": self.trainer_id, RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, OP_ROLE_VAR_ATTR_NAME: [param_varname, recv_op_role_var_name], @@ -440,6 +446,7 @@ class DistributeTranspiler(object): outputs={"Out": all_recv_outputs}, attrs={ "endpoints": pserver_endpoints, + "trainer_id": self.trainer_id, RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) @@ -651,6 +658,24 @@ in a single call.") endpoint, op): opt_op_on_pserver.append(op) # step 3.3 + # prepare if dc asgd is enabled + if self.config.enable_dc_asgd == True: + assert (self.sync_mode == False) + self.param_bak_list = [] + # add param_bak for each trainer + for p in self.param_grad_ep_mapping[endpoint]["params"]: + # each parameter should have w_bak for each trainer id + for i in range(self.trainer_num): + param_bak_name = "%s.trainer_%d_bak" % (p.name, i) + tmpvar = pserver_program.global_block().create_var( + # NOTE: this var name format is used in `request_get_handler` + name=param_bak_name, + type=p.type, + shape=p.shape, + dtype=p.dtype) + self.param_bak_list.append((p, tmpvar)) + + # step 3.4 # Iterate through the ops, and if an op and the optimize ops # which located on current pserver are in one set, then # append it into the sub program. @@ -741,7 +766,7 @@ in a single call.") grad_to_block_id, merged_var, lr_ops) - # dedup grad to ids list +# dedup grad to ids list grad_to_block_id = list(set(grad_to_block_id)) # append global ops if global_ops: @@ -787,6 +812,8 @@ in a single call.") if self.has_distributed_lookup_table: attrs['checkpint_block_id'] = checkpoint_block_id + if self.config.enable_dc_asgd: + attrs['dc_asgd'] = True if len(prefetch_var_name_to_block_id) > 0: attrs[ @@ -903,6 +930,15 @@ to transpile() call.") inputs=new_inputs, outputs=new_outputs, attrs=op.all_attrs()) + if self.config.enable_dc_asgd: + for p, p_bak in self.param_bak_list: + startup_param_var = s_prog.global_block().vars[p.name] + startup_tmpvar = s_prog.global_block().vars[p_bak.name] + # copy init random value to param_bak + s_prog.global_block().append_op( + type="assign", + inputs={"X": startup_param_var}, + outputs={"Out": startup_tmpvar}) # add slice vars s_prog._slice_vars_and_attrs = self._get_slice_vars_and_attrs(endpoint) @@ -920,11 +956,11 @@ to transpile() call.") block_idx = int(block_name.split(block_suffix)[1]) orig_var = self.origin_program.global_block().vars[orig_var_name] - skip_numel = 0 + skip_dim0 = 0 slice_vars = self.param_var_mapping[orig_var_name] for slice_var in slice_vars[:block_idx]: - skip_numel += reduce(lambda x, y: x * y, slice_var.shape) - slice_vars_and_attrs.append([orig_var, skip_numel, param]) + skip_dim0 += slice_var.shape[0] + slice_vars_and_attrs.append([orig_var, skip_dim0, param]) return slice_vars_and_attrs @@ -1175,6 +1211,7 @@ to transpile() call.") attrs={ "sync_mode": not self.sync_mode, "epmap": pserver_endpoints, + "trainer_id": self.trainer_id, RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE, OP_ROLE_VAR_ATTR_NAME: [ self.grad_name_to_param_name[table_grad_name], @@ -1531,6 +1568,68 @@ to transpile() call.") attrs={"scale": 1.0 / float(self.trainer_num)}) return merged_var + def _append_dc_asgd_ops(self, block, param_var, grad_var): + # NOTE: can not use grammar candy here, should put ops in specific block + local_param_bak = block.create_var( + name="%s.local_bak" % param_var.name, + shape=param_var.shape, + type=param_var.type, + dtype=param_var.dtype, + persistable=False) + # trainer_id_var is block local + trainer_id_var = block.create_var( + name="@TRAINER_ID@", + type=core.VarDesc.VarType.LOD_TENSOR, + dtype=core.VarDesc.VarType.INT64, + shape=[1], + persistable=False) + + # ref_inputs = [x[1] for x in self.param_bak_list] + ref_inputs = [] + for p, p_bak in self.param_bak_list: + if p.name == param_var.name: + ref_inputs.append(p_bak) + block.append_op( + type="ref_by_trainer_id", + inputs={"X": ref_inputs, + "TrainerId": trainer_id_var}, + outputs={"Out": local_param_bak}) + + def __create_temp_var__(): + return block.create_var( + name=unique_name.generate("tmp_dc_output"), + shape=param_var.shape, + type=param_var.type, + dtype=param_var.dtype, + persistable=False) + + o1 = __create_temp_var__() + block.append_op( + type="elementwise_sub", + inputs={"X": param_var, + "Y": local_param_bak}, + outputs={"Out": o1}) + o2 = __create_temp_var__() + block.append_op( + type="elementwise_mul", + inputs={"X": o1, + "Y": grad_var}, + outputs={"Out": o2}) + o3 = __create_temp_var__() + block.append_op( + type="elementwise_mul", + inputs={"X": o2, + "Y": grad_var}, + outputs={"Out": o3}) + # TODO(typhoonzero): append scale + o4 = __create_temp_var__() + block.append_op( + type="elementwise_add", + inputs={"X": grad_var, + "Y": o3}, + outputs={"Out": o4}) + return o4 + def _append_pserver_ops(self, optimize_block, opt_op, endpoint, grad_to_block_id, origin_program, merged_var): program = optimize_block.program @@ -1546,9 +1645,16 @@ to transpile() call.") break return param_block + if self.config.enable_dc_asgd: + param_var = _get_param_block(opt_op) + dc = self._append_dc_asgd_ops(optimize_block, param_var, merged_var) + for key in opt_op.input_names: if key == "Grad": - new_inputs[key] = merged_var + if self.config.enable_dc_asgd: + new_inputs[key] = dc + else: + new_inputs[key] = merged_var elif key == "Param": param_block = _get_param_block(opt_op) if not param_block: diff --git a/python/paddle/fluid/transpiler/inference_transpiler.py b/python/paddle/fluid/transpiler/inference_transpiler.py index 5269bd94cec47a5262e2389c5b02f91edd5a7d17..9a13cecc646e8534a157fad882fd97836348deb4 100644 --- a/python/paddle/fluid/transpiler/inference_transpiler.py +++ b/python/paddle/fluid/transpiler/inference_transpiler.py @@ -61,6 +61,9 @@ class InferenceTranspiler(object): raise TypeError("scope should be as Scope type or None") use_mkldnn = bool(os.getenv("FLAGS_use_mkldnn", False)) + if use_mkldnn: + self._depthwise_conv_mkldnn(program) + self._fuse_batch_norm(program, place, scope) if use_mkldnn: self._fuse_conv_bias_mkldnn(program) @@ -70,6 +73,31 @@ class InferenceTranspiler(object): program) # ResNet residual block merging self._fuse_bn_relu_mkldnn(program) + def _depthwise_conv_mkldnn(self, program): + ''' + Transpile the program by replacing depthwise_conv2d to conv2d for MKLDNN program. + The result is: + - before: + - any_other_op->depthwise_conv->any_other_op + - after: + - any_other_op->conv->any_other_op + :param program: program to transpile + :type program: Program + ''' + self.block = program.block(0) + + i = 0 + while i < len(self.block.ops): + current_op = self.block.ops[i] + if current_op.type == 'depthwise_conv2d': + current_op.desc.set_type("conv2d") + i = i + 1 + + # TODO(luotao): use clone() method to flush the program.desc in force, + # since some large program.desc will not be flushed immediately. + # And a better solution will be considered later. + program = program.clone() + def _fuse_conv_eltwise_mkldnn(self, program): ''' Transpile the program fusing elementwise_add into conv for MKLDNN diff --git a/python/setup.py.in b/python/setup.py.in index ce65d0003fb7132b8b51f3e3eae7d43a51a0bde8..2a311d319b6a48536245ff0d26179f88506fb544 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -14,7 +14,8 @@ ext_name = '.dll' if os.name == 'nt' else '.so' def git_commit(): try: cmd = ['git', 'rev-parse', 'HEAD'] - git_commit = subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0].strip() + git_commit = subprocess.Popen(cmd, stdout = subprocess.PIPE, + cwd="@PADDLE_SOURCE_DIR@").communicate()[0].strip() except: git_commit = 'Unknown' git_commit = git_commit.decode() @@ -27,7 +28,7 @@ def _get_version_detail(idx): if re.match('@TAG_VERSION_REGEX@', '@PADDLE_VERSION@'): version_details = '@PADDLE_VERSION@'.split('.') - if len(version_details) == 3: + if len(version_details) >= 3: return version_details[idx] return 0 @@ -44,7 +45,7 @@ def get_patch(): def is_taged(): try: cmd = ['git', 'describe', '--exact-match', '--tags', 'HEAD', '2>/dev/null'] - git_tag = subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0].strip() + git_tag = subprocess.Popen(cmd, stdout = subprocess.PIPE, cwd="@PADDLE_SOURCE_DIR@").communicate()[0].strip() git_tag = git_tag.decode() except: return False @@ -55,8 +56,7 @@ def is_taged(): return False def write_version_py(filename='paddle/version.py'): - cnt = ''' -# THIS FILE IS GENERATED FROM PADDLEPADDLE SETUP.PY + cnt = '''# THIS FILE IS GENERATED FROM PADDLEPADDLE SETUP.PY # full_version = '%(major)d.%(minor)d.%(patch)s' major = '%(major)d'