From ab1097cd8e83f26f2efb1a8de6011cab000c6041 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Fri, 31 Aug 2018 19:02:35 +0800 Subject: [PATCH] Feature/template (#13093) * remove template operator * "fix compile" * "fix ci" * "fix ci" --- .../fluid/framework/data_layout_transform.cc | 2 +- paddle/fluid/framework/data_type.h | 53 ++++--------------- paddle/fluid/framework/data_type_transform.cc | 2 +- .../framework/details/reduce_and_gather.h | 2 +- paddle/fluid/framework/selected_rows.cc | 2 +- paddle/fluid/framework/tensor_util.cc | 4 +- .../fluid/operators/beam_search_decode_op.cc | 6 +-- paddle/fluid/operators/cast_op.h | 2 +- .../detection/generate_proposals_op.cc | 2 +- paddle/fluid/operators/fill_op.cc | 2 +- paddle/fluid/operators/math/math_function.cc | 2 +- paddle/fluid/operators/math/math_function.cu | 2 +- paddle/fluid/operators/one_hot_op.cu | 2 +- paddle/fluid/operators/one_hot_op.h | 2 +- paddle/fluid/operators/sequence_mask_op.h | 2 +- 15 files changed, 26 insertions(+), 61 deletions(-) diff --git a/paddle/fluid/framework/data_layout_transform.cc b/paddle/fluid/framework/data_layout_transform.cc index cd00b7de73..c9e3a8ac1d 100644 --- a/paddle/fluid/framework/data_layout_transform.cc +++ b/paddle/fluid/framework/data_layout_transform.cc @@ -46,7 +46,7 @@ struct CastDataLayout { const std::vector axis_; template - void operator()() { + void apply() { auto place = ctx_->GetPlace(); if (platform::is_cpu_place(place)) { diff --git a/paddle/fluid/framework/data_type.h b/paddle/fluid/framework/data_type.h index 84691a2059..8ad2fb5f3f 100644 --- a/paddle/fluid/framework/data_type.h +++ b/paddle/fluid/framework/data_type.h @@ -26,75 +26,40 @@ namespace framework { extern proto::VarType::Type ToDataType(std::type_index type); extern std::type_index ToTypeIndex(proto::VarType::Type type); -#if !defined(_WIN32) template inline void VisitDataType(proto::VarType::Type type, Visitor visitor) { switch (type) { case proto::VarType::FP16: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::FP32: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::FP64: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::INT32: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::INT64: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::BOOL: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::UINT8: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::INT16: - visitor.template operator()(); + visitor.template apply(); break; case proto::VarType::INT8: - visitor.template operator()(); + visitor.template apply(); break; default: PADDLE_THROW("Not supported %d", type); } } -#else -// the msvc compiler do not implement two-stage name lookup correctly. -template -inline void VisitDataType(proto::VarType::Type type, Visitor visitor) { - switch (type) { - case proto::VarType::FP16: - visitor.operator()(); - break; - case proto::VarType::FP32: - visitor.operator()(); - break; - case proto::VarType::FP64: - visitor.operator()(); - break; - case proto::VarType::INT32: - visitor.operator()(); - break; - case proto::VarType::INT64: - visitor.operator()(); - break; - case proto::VarType::BOOL: - visitor.operator()(); - break; - case proto::VarType::UINT8: - visitor.operator()(); - break; - case proto::VarType::INT16: - visitor.operator()(); - break; - default: - PADDLE_THROW("Not supported %d", type); - } -} -#endif // _WIN32 extern std::string DataTypeToString(const proto::VarType::Type type); extern size_t SizeOfType(std::type_index type); diff --git a/paddle/fluid/framework/data_type_transform.cc b/paddle/fluid/framework/data_type_transform.cc index 5a57ec2058..d79f8cacb5 100644 --- a/paddle/fluid/framework/data_type_transform.cc +++ b/paddle/fluid/framework/data_type_transform.cc @@ -37,7 +37,7 @@ struct CastDataType { const platform::DeviceContext* ctx_; template - void operator()() { + void apply() { auto* in_begin = in_.data(); auto* in_end = in_begin + in_.numel(); auto* out_begin = out_->mutable_data(in_.place()); diff --git a/paddle/fluid/framework/details/reduce_and_gather.h b/paddle/fluid/framework/details/reduce_and_gather.h index e28264eb32..bd6153c0c7 100644 --- a/paddle/fluid/framework/details/reduce_and_gather.h +++ b/paddle/fluid/framework/details/reduce_and_gather.h @@ -31,7 +31,7 @@ struct ReduceLoDTensor { : src_tensors_(src), dst_tensor_(*dst) {} template - void operator()() const { + void apply() const { PADDLE_ENFORCE(!src_tensors_.empty()); auto &t0 = *src_tensors_[0]; PADDLE_ENFORCE_NE(t0.numel(), 0); diff --git a/paddle/fluid/framework/selected_rows.cc b/paddle/fluid/framework/selected_rows.cc index a4319ffabb..8c290bb095 100644 --- a/paddle/fluid/framework/selected_rows.cc +++ b/paddle/fluid/framework/selected_rows.cc @@ -49,7 +49,7 @@ struct TensorCopyVisitor { size_(size) {} template - void operator()() const { + void apply() const { // TODO(Yancey1989): support other place platform::CPUPlace cpu; memory::Copy(cpu, dst_->mutable_data(cpu) + dst_offset_, cpu, diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index ab693004cf..05c4a17a01 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -149,7 +149,7 @@ struct AnyDTypeVisitor { : predicate_(predicate), tensor_(tensor), ctx_(ctx), out_(out) {} template - void operator()() const { + void apply() const { auto t = EigenVector::Flatten(tensor_); auto o = EigenScalar::From(*out_); // return any of predicate_(t) is true. @@ -302,7 +302,7 @@ struct DeserializedDataFunctor { : buf_(buf), tensor_(tensor), place_(place) {} template - void operator()() { + void apply() { *buf_ = tensor_->mutable_data(place_); } diff --git a/paddle/fluid/operators/beam_search_decode_op.cc b/paddle/fluid/operators/beam_search_decode_op.cc index 10d678111f..b6cb935814 100644 --- a/paddle/fluid/operators/beam_search_decode_op.cc +++ b/paddle/fluid/operators/beam_search_decode_op.cc @@ -74,7 +74,7 @@ struct BeamSearchDecodeFunctor { } template - void operator()() const; + void apply() const; bool tensor_on_gpu_; size_t beam_size_; @@ -88,7 +88,7 @@ struct BeamSearchDecodeFunctor { }; template -void BeamSearchDecodeFunctor::operator()() const { +void BeamSearchDecodeFunctor::apply() const { BeamSearchDecoder beam_search_decoder(beam_size_, end_id_); // Check if the tensor is on GPU. If so, use the CPU copy instead if (tensor_on_gpu_) { @@ -101,7 +101,7 @@ void BeamSearchDecodeFunctor::operator()() const { } template <> -void BeamSearchDecodeFunctor::operator()() const { +void BeamSearchDecodeFunctor::apply() const { PADDLE_THROW("beam search decode op does not support bool!"); } diff --git a/paddle/fluid/operators/cast_op.h b/paddle/fluid/operators/cast_op.h index 6220e57f59..8fa0416049 100644 --- a/paddle/fluid/operators/cast_op.h +++ b/paddle/fluid/operators/cast_op.h @@ -37,7 +37,7 @@ struct CastOpFunctor { : in_(in), out_(out), ctx_(ctx) {} template - void operator()() const { + void apply() const { auto* in_begin = in_->data(); auto numel = in_->numel(); auto* in_end = in_begin + numel; diff --git a/paddle/fluid/operators/detection/generate_proposals_op.cc b/paddle/fluid/operators/detection/generate_proposals_op.cc index d29b015338..fcdcafae72 100644 --- a/paddle/fluid/operators/detection/generate_proposals_op.cc +++ b/paddle/fluid/operators/detection/generate_proposals_op.cc @@ -33,7 +33,7 @@ struct AppendProposalsFunctor { : out_(out), offset_(offset), to_add_(to_add) {} template - void operator()() const { + void apply() const { auto *out_data = out_->data(); auto *to_add_data = to_add_->data(); memcpy(out_data + offset_, to_add_data, to_add_->numel() * sizeof(T)); diff --git a/paddle/fluid/operators/fill_op.cc b/paddle/fluid/operators/fill_op.cc index 925dc19061..adc7cb1f9e 100644 --- a/paddle/fluid/operators/fill_op.cc +++ b/paddle/fluid/operators/fill_op.cc @@ -25,7 +25,7 @@ struct FillOpVisitor { : tensor_(tensor), value_(value) {} template - void operator()() const { + void apply() const { platform::CPUPlace cpu; auto *data = tensor_->mutable_data(cpu); std::transform(value_.data(), value_.data() + tensor_->numel(), data, diff --git a/paddle/fluid/operators/math/math_function.cc b/paddle/fluid/operators/math/math_function.cc index 9a6e646b28..5923792902 100644 --- a/paddle/fluid/operators/math/math_function.cc +++ b/paddle/fluid/operators/math/math_function.cc @@ -55,7 +55,7 @@ struct TensorSetConstantCPU { TensorSetConstantCPU(framework::Tensor* tensor, float value) : tensor_(tensor), value_(value) {} template - void operator()() const { + void apply() const { auto cpu = platform::CPUPlace(); auto* begin = tensor_->mutable_data(cpu); std::fill(begin, begin + tensor_->numel(), static_cast(value_)); diff --git a/paddle/fluid/operators/math/math_function.cu b/paddle/fluid/operators/math/math_function.cu index 12d1baa8fb..79b7538ad0 100644 --- a/paddle/fluid/operators/math/math_function.cu +++ b/paddle/fluid/operators/math/math_function.cu @@ -52,7 +52,7 @@ struct TensorSetConstantGPU { : context_(context), tensor_(tensor), value_(value) {} template - void operator()() const { + void apply() const { SetConstant functor; functor(reinterpret_cast(context_), tensor_, static_cast(value_)); diff --git a/paddle/fluid/operators/one_hot_op.cu b/paddle/fluid/operators/one_hot_op.cu index 625065692c..59d8b9b8a8 100644 --- a/paddle/fluid/operators/one_hot_op.cu +++ b/paddle/fluid/operators/one_hot_op.cu @@ -41,7 +41,7 @@ struct OneHotOpCUDAFunctor { : in_(in), out_(out), depth_(depth), ctx_(ctx) {} template - void operator()() const { + void apply() const { auto* p_in_data = in_->data(); auto numel = in_->numel(); auto* p_out_data = out_->mutable_data(ctx_.GetPlace()); diff --git a/paddle/fluid/operators/one_hot_op.h b/paddle/fluid/operators/one_hot_op.h index 7e77f25089..1ebd267649 100644 --- a/paddle/fluid/operators/one_hot_op.h +++ b/paddle/fluid/operators/one_hot_op.h @@ -31,7 +31,7 @@ struct OneHotOpFunctor { : in_(in), out_(out), depth_(depth), ctx_(ctx) {} template - void operator()() const { + void apply() const { auto* p_in_data = in_->data(); auto numel = in_->numel(); auto* p_out_data = out_->mutable_data(ctx_.GetPlace()); diff --git a/paddle/fluid/operators/sequence_mask_op.h b/paddle/fluid/operators/sequence_mask_op.h index 0dd554adfe..18acb735ce 100644 --- a/paddle/fluid/operators/sequence_mask_op.h +++ b/paddle/fluid/operators/sequence_mask_op.h @@ -99,7 +99,7 @@ struct SequenceMaskFunctor { : ctx_(ctx), x_(x), y_(y), limits_(limits), maxlen_(maxlen) {} template - void operator()() const { + void apply() const { auto *y_data = y_->mutable_data(ctx_.GetPlace()); platform::ForRange for_range(ctx_, limits_); for_range(SequenceMaskForRangeFunctor(x_, y_data, maxlen_)); -- GitLab