From 6641a314224b3114c97c118a217201812ca2fea7 Mon Sep 17 00:00:00 2001 From: hjchen2 Date: Fri, 15 Feb 2019 20:48:51 +0800 Subject: [PATCH] Delivery input lod to output for elementwise_add/top_k/activation ops --- src/operators/activation_op.cpp | 11 +- src/operators/elementwise_add_op.cpp | 1 + .../kernel/arm/beam_search_decode_kernel.cpp | 253 +++++++- .../kernel/arm/conv_add_bn_relu_kernel.cpp | 4 +- .../kernel/arm/conv_bn_add_relu_kernel.cpp | 4 +- .../kernel/arm/conv_bn_relu_kernel.cpp | 4 +- src/operators/kernel/arm/conv_kernel.cpp | 2 +- .../kernel/arm/dwconv_bn_relu_kernel.cpp | 4 +- .../kernel/arm/sequence_softmax_kernel.cpp | 10 +- src/operators/op_param.h | 582 +++++++++--------- src/operators/softmax_op.cpp | 1 + src/operators/top_k_op.cpp | 6 +- 12 files changed, 557 insertions(+), 325 deletions(-) diff --git a/src/operators/activation_op.cpp b/src/operators/activation_op.cpp index 76c9e1a014..afe806a651 100644 --- a/src/operators/activation_op.cpp +++ b/src/operators/activation_op.cpp @@ -17,11 +17,12 @@ limitations under the License. */ namespace paddle_mobile { namespace operators { -#define DEFINE_ACTIVATION_INFERSHAPE(OpName) \ - template \ - void OpName##Op::InferShape() const { \ - const auto &input_dims = this->param_.InputX()->dims(); \ - this->param_.Out()->Resize(input_dims); \ +#define DEFINE_ACTIVATION_INFERSHAPE(OpName) \ + template \ + void OpName##Op::InferShape() const { \ + const auto &input_dims = this->param_.InputX()->dims(); \ + this->param_.Out()->Resize(input_dims); \ + this->param_.Out()->set_lod(this->param_.InputX()->lod()); \ } #ifdef RELU_OP diff --git a/src/operators/elementwise_add_op.cpp b/src/operators/elementwise_add_op.cpp index 281cd3d508..6fde477f22 100644 --- a/src/operators/elementwise_add_op.cpp +++ b/src/operators/elementwise_add_op.cpp @@ -23,6 +23,7 @@ template void ElementwiseAddOp::InferShape() const { auto x_dim = this->param_.InputX()->dims(); this->param_.Out()->Resize(x_dim); + this->param_.Out()->set_lod(this->param_.InputX()->lod()); } } // namespace operators diff --git a/src/operators/kernel/arm/beam_search_decode_kernel.cpp b/src/operators/kernel/arm/beam_search_decode_kernel.cpp index 343b731e35..a241bc62c7 100644 --- a/src/operators/kernel/arm/beam_search_decode_kernel.cpp +++ b/src/operators/kernel/arm/beam_search_decode_kernel.cpp @@ -15,27 +15,260 @@ limitations under the License. */ #ifdef BEAM_SEARCH_DECODE_OP #include "operators/kernel/beam_search_decode_kernel.h" +#include "framework/data_type.h" namespace paddle_mobile { namespace operators { +using LoDTensor = framework::LoDTensor; +using LoDTensorArray = framework::LoDTensorArray; + +// all the lod have 2 levels. +// The first is source level, the second is sentence level. +// source level describe how many prefixes (branchs) for each source sentece +// (beam). sentence level describe how these candidates belong to the prefixes. +const size_t kSourceLevel = 0; +const size_t kSentenceLevel = 1; + +template +struct Sentence { + std::vector word_ids; + std::vector scores; +}; + +template +using SentenceVector = std::vector>; + +template +struct BeamSearchDecoder { + BeamSearchDecoder(size_t beam_size, int end_id) + : beam_size_(beam_size), end_id_(end_id) {} + + /** + * convert the result sentence_vector for each source sentence into two + * LodTensor. + * One is all candidate sentences with word id, one is all candidate sentences + * with word score. + * Param: + * sentence_vector_list: sentence_vector for each source sentence. + * id_tensor: result LoDTensor for sentences of id. + * score_tensor: result LoDTensor for sentences of score. + * reverse: whether ids of sentence in sentence_vector_list is reversed + * sort_by_score: whether to sort hypotheses of each sentence by scores. + */ + void ConvertSentenceVectorToLodTensor( + std::vector> sentence_vector_list, LoDTensor* id_tensor, + LoDTensor* score_tensor, bool reverse = true, + bool sort_by_score = true) const; + + /** + * Gather the hypotheses for each source sentence by backtrace though the + * LoDTensorArray step_ids whose lods reserve the path in the tree. + */ + void Backtrace(const LoDTensorArray& step_ids, + const LoDTensorArray& step_scores, LoDTensor* id_tensor, + LoDTensor* score_tensor) const; + + size_t beam_size_; + int end_id_; +}; + +template +void BeamSearchDecoder::ConvertSentenceVectorToLodTensor( + std::vector> sentence_vector_list, LoDTensor* id_tensor, + LoDTensor* score_tensor, bool reverse, bool sort_by_score) const { + size_t src_num = sentence_vector_list.size(); + + PADDLE_MOBILE_ENFORCE(src_num > 0, "src_num should be larger than 0"); + + std::vector source_level_lod = {0}; + std::vector sentence_level_lod = {0}; + std::vector id_data; + std::vector score_data; + + for (size_t src_idx = 0; src_idx < src_num; ++src_idx) { + if (sort_by_score) { + sort(sentence_vector_list[src_idx].begin(), + sentence_vector_list[src_idx].end(), + [reverse](const Sentence& a, const Sentence& b) { + if (reverse) + return a.scores.front() > b.scores.front(); + else + return a.scores.back() > b.scores.back(); + }); + } + for (Sentence& sentence : sentence_vector_list[src_idx]) { + if (reverse) { + id_data.insert(id_data.end(), sentence.word_ids.rbegin(), + sentence.word_ids.rend()); + score_data.insert(score_data.end(), sentence.scores.rbegin(), + sentence.scores.rend()); + } else { + id_data.insert(id_data.end(), sentence.word_ids.begin(), + sentence.word_ids.end()); + score_data.insert(score_data.end(), sentence.scores.begin(), + sentence.scores.end()); + } + + sentence_level_lod.push_back(sentence_level_lod.back() + + sentence.word_ids.size()); + } + source_level_lod.push_back(source_level_lod.back() + + sentence_vector_list[src_idx].size()); + } + + framework::LoD lod; + lod.push_back(source_level_lod); + lod.push_back(sentence_level_lod); + + id_tensor->set_lod(lod); + id_tensor->Resize({static_cast(id_data.size())}); + id_tensor->mutable_data(); + // framework::TensorFromVector(id_data, cpu_ctx, id_tensor); + + score_tensor->set_lod(lod); + score_tensor->Resize({static_cast(score_data.size())}); + score_tensor->mutable_data(); + // framework::TensorFromVector(score_data, cpu_ctx, score_tensor); +} + +template +void BeamSearchDecoder::Backtrace(const LoDTensorArray& step_ids, + const LoDTensorArray& step_scores, + LoDTensor* id_tensor, + LoDTensor* score_tensor) const { + PADDLE_MOBILE_ENFORCE(!step_ids.empty(), "step num should be larger than 0"); + PADDLE_MOBILE_ENFORCE(step_ids.size() == step_scores.size(), + "step_ids and step_scores should be the same"); + const size_t step_num = step_ids.size(); + const size_t src_num = step_ids.at(0).lod().at(kSourceLevel).size() - 1; + std::vector> sentence_vector_list( + src_num, SentenceVector(beam_size_)); + std::vector> prefix_idx_vector_list(src_num); + for (int step_id = step_num - 1; step_id >= 0; --step_id) { + auto& cur_ids = step_ids.at(step_id); + auto& cur_scores = step_scores.at(step_id); + for (size_t src_idx = 0; src_idx < src_num; ++src_idx) { + // for each source sentence + auto& sentence_vector = sentence_vector_list.at(src_idx); + auto& prefix_idx_vector = prefix_idx_vector_list.at(src_idx); + size_t src_prefix_start = cur_ids.lod().at(kSourceLevel)[src_idx]; + size_t src_prefix_end = cur_ids.lod().at(kSourceLevel)[src_idx + 1]; + if (prefix_idx_vector.empty()) { // be finished and pruned at this step + // or the last time step + for (size_t prefix_idx = src_prefix_start; prefix_idx < src_prefix_end; + ++prefix_idx) { + size_t candidate_start = cur_ids.lod().at(kSentenceLevel)[prefix_idx]; + size_t candidate_end = + cur_ids.lod().at(kSentenceLevel)[prefix_idx + 1]; + for (size_t candidate_idx = candidate_start; + candidate_idx < candidate_end; ++candidate_idx) { + prefix_idx_vector.push_back(prefix_idx); + size_t idx = prefix_idx_vector.size() - 1; + auto cur_id = cur_ids.data()[candidate_idx]; + auto cur_score = cur_scores.data()[candidate_idx]; + sentence_vector.at(idx).word_ids.push_back(cur_id); + sentence_vector.at(idx).scores.push_back(cur_score); + } + } + } else { // use prefix_idx_vector to backtrace + size_t src_candidate_start = + cur_ids.lod().at(kSentenceLevel)[src_prefix_start]; + size_t prefix_idx = src_prefix_start; + size_t candidate_num = + cur_ids.lod().at(kSentenceLevel)[prefix_idx + 1] - + cur_ids.lod().at(kSentenceLevel)[prefix_idx]; + for (size_t idx = 0; idx < prefix_idx_vector.size(); ++idx) { + auto candidate_idx = prefix_idx_vector.at(idx); + auto cur_id = cur_ids.data()[candidate_idx]; + auto cur_score = cur_scores.data()[candidate_idx]; + if (cur_id != end_id_ || sentence_vector.at(idx).word_ids.empty()) { + // to skip redundant end tokens + sentence_vector.at(idx).word_ids.push_back(cur_id); + sentence_vector.at(idx).scores.push_back(cur_score); + } + + while (src_candidate_start + candidate_num <= + candidate_idx) { // search the corresponding prefix + prefix_idx++; + candidate_num += cur_ids.lod().at(kSentenceLevel)[prefix_idx + 1] - + cur_ids.lod().at(kSentenceLevel)[prefix_idx]; + } + prefix_idx_vector.at(idx) = prefix_idx; + } + } + } + } + + ConvertSentenceVectorToLodTensor(sentence_vector_list, id_tensor, + score_tensor, true, true); +} + +struct BeamSearchDecodeFunctor { + BeamSearchDecodeFunctor(const LoDTensorArray& step_ids, + const LoDTensorArray& step_scores, + LoDTensor* id_tensor, LoDTensor* score_tensor, + size_t beam_size, int end_id) + : beam_size_(beam_size), + end_id_(end_id), + step_ids_(step_ids), + step_scores_(step_scores), + id_tensor_(id_tensor), + score_tensor_(score_tensor) {} + + template + void apply() const; + + size_t beam_size_; + int end_id_; + const LoDTensorArray& step_ids_; + const LoDTensorArray& step_scores_; + LoDTensor* id_tensor_; + LoDTensor* score_tensor_; +}; + +template +void BeamSearchDecodeFunctor::apply() const { + BeamSearchDecoder beam_search_decoder(beam_size_, end_id_); + beam_search_decoder.Backtrace(step_ids_, step_scores_, id_tensor_, + score_tensor_); +} + +template <> +void BeamSearchDecodeFunctor::apply() const { + PADDLE_MOBILE_THROW_EXCEPTION("beam search decode op does not support bool."); +} + template <> bool BeamSearchDecodeKernel::Init( - BeamSearchDecodeParam *param) { + BeamSearchDecodeParam* param) { return true; } template <> void BeamSearchDecodeKernel::Compute( - const BeamSearchDecodeParam ¶m) { - // TODO(hjchen2) - DLOG << "BeamSearchDecodeKernel"; - param.sentence_scores_->Resize(framework::make_ddim({10})); - param.sentence_scores_->mutable_data(); - DLOG << "BeamSearchDecodeKernel"; - - param.sentence_ids_->Resize(framework::make_ddim({10})); - param.sentence_ids_->mutable_data(); + const BeamSearchDecodeParam& param) { + const LoDTensorArray* ids = param.ids_; + const LoDTensorArray* scores = param.scores_; + + const size_t step_num = ids->size(); + PADDLE_MOBILE_ENFORCE(step_num > 0, + "beam search steps should be larger than 0"); + + for (size_t i = 0; i < step_num; ++i) { + PADDLE_MOBILE_ENFORCE(ids->at(i).lod().size() == 2, + "Level of LodTensor should be 2"); + } + const size_t source_num = ids->at(0).lod().at(0).size() - 1; + PADDLE_MOBILE_ENFORCE(source_num > 0, "source num should be larger than 0"); + + LoDTensor* sentence_ids = param.sentence_ids_; + LoDTensor* sentence_scores = param.sentence_scores_; + + framework::VisitDataType( + framework::ToDataType(scores->at(0).type()), + BeamSearchDecodeFunctor(*ids, *scores, sentence_ids, sentence_scores, + param.beam_size_, param.end_id_)); } } // namespace operators diff --git a/src/operators/kernel/arm/conv_add_bn_relu_kernel.cpp b/src/operators/kernel/arm/conv_add_bn_relu_kernel.cpp index 1e67f9cc60..635aac4dff 100644 --- a/src/operators/kernel/arm/conv_add_bn_relu_kernel.cpp +++ b/src/operators/kernel/arm/conv_add_bn_relu_kernel.cpp @@ -41,8 +41,8 @@ bool ConvAddBNReluKernel::Init( inv_std_ptr[i] = 1 / static_cast(pow((variance_ptr[i] + epsilon), 0.5)); } - Tensor *new_scale = new Tensor(); - Tensor *new_bias = new Tensor(); + LoDTensor *new_scale = new LoDTensor(); + LoDTensor *new_bias = new LoDTensor(); auto new_scale_ptr = new_scale->mutable_data({C}); auto new_bias_ptr = new_bias->mutable_data({C}); for (int i = 0; i < C; i++) { diff --git a/src/operators/kernel/arm/conv_bn_add_relu_kernel.cpp b/src/operators/kernel/arm/conv_bn_add_relu_kernel.cpp index 17c9fbd315..fb5cbb68e6 100644 --- a/src/operators/kernel/arm/conv_bn_add_relu_kernel.cpp +++ b/src/operators/kernel/arm/conv_bn_add_relu_kernel.cpp @@ -41,8 +41,8 @@ bool ConvBNAddReluKernel::Init( inv_std_ptr[i] = 1 / static_cast(pow((variance_ptr[i] + epsilon), 0.5)); } - Tensor *new_scale = new Tensor(); - Tensor *new_bias = new Tensor(); + LoDTensor *new_scale = new LoDTensor(); + LoDTensor *new_bias = new LoDTensor(); auto new_scale_ptr = new_scale->mutable_data({C}); auto new_bias_ptr = new_bias->mutable_data({C}); for (int i = 0; i < C; i++) { diff --git a/src/operators/kernel/arm/conv_bn_relu_kernel.cpp b/src/operators/kernel/arm/conv_bn_relu_kernel.cpp index 44c7f463f4..bac91ff273 100644 --- a/src/operators/kernel/arm/conv_bn_relu_kernel.cpp +++ b/src/operators/kernel/arm/conv_bn_relu_kernel.cpp @@ -42,8 +42,8 @@ bool ConvBNReluKernel::Init(FusionConvBNReluParam *param) { inv_std_ptr[i] = 1 / static_cast(pow((variance_ptr[i] + epsilon), 0.5)); } - Tensor *new_scale = new Tensor(); - Tensor *new_bias = new Tensor(); + LoDTensor *new_scale = new LoDTensor(); + LoDTensor *new_bias = new LoDTensor(); auto new_scale_ptr = new_scale->mutable_data({C}); auto new_bias_ptr = new_bias->mutable_data({C}); for (int i = 0; i < C; i++) { diff --git a/src/operators/kernel/arm/conv_kernel.cpp b/src/operators/kernel/arm/conv_kernel.cpp index f7f55b790d..0fafe210a5 100644 --- a/src/operators/kernel/arm/conv_kernel.cpp +++ b/src/operators/kernel/arm/conv_kernel.cpp @@ -69,7 +69,7 @@ bool ConvKernel::Init(ConvParam *param) { param->Input()->dims()[2] <= 140 /* refered from ncnn */) { param->ExecMode() = ConvParam::EXEC_WINOGRAD3X3_FLOAT; // transform weight - param->transformed_filter_ = new framework::Tensor; + param->transformed_filter_ = new framework::LoDTensor; operators::math::winograd_transform_weight<8, 3>( *param->Filter(), param->transformed_filter_); #endif diff --git a/src/operators/kernel/arm/dwconv_bn_relu_kernel.cpp b/src/operators/kernel/arm/dwconv_bn_relu_kernel.cpp index 60d3530294..38dd6ae181 100644 --- a/src/operators/kernel/arm/dwconv_bn_relu_kernel.cpp +++ b/src/operators/kernel/arm/dwconv_bn_relu_kernel.cpp @@ -40,8 +40,8 @@ bool DWConvBNReluKernel::Init(FusionDWConvBNReluParam *param) { inv_std_ptr[i] = 1 / static_cast(pow((variance_ptr[i] + epsilon), 0.5)); } - Tensor *new_scale = new Tensor(); - Tensor *new_bias = new Tensor(); + LoDTensor *new_scale = new LoDTensor(); + LoDTensor *new_bias = new LoDTensor(); auto new_scale_ptr = new_scale->mutable_data({C}); auto new_bias_ptr = new_bias->mutable_data({C}); for (int i = 0; i < C; i++) { diff --git a/src/operators/kernel/arm/sequence_softmax_kernel.cpp b/src/operators/kernel/arm/sequence_softmax_kernel.cpp index 4fb79888cd..b0df21fac5 100644 --- a/src/operators/kernel/arm/sequence_softmax_kernel.cpp +++ b/src/operators/kernel/arm/sequence_softmax_kernel.cpp @@ -29,12 +29,10 @@ class SequenceSoftmaxKernel void Compute(const SoftmaxParam ¶m) { param.Out()->mutable_data(); - /* - const framework::LoDTensor *input = param.InputX(); - framework::LoDTensor *output = param.Out(); - math::SequenceSoftmaxFuntor sequence_softmax; - sequence_softmax(input, output); - */ + const framework::LoDTensor *input = param.InputX(); + framework::LoDTensor *output = param.Out(); + math::SequenceSoftmaxFuntor sequence_softmax; + sequence_softmax(input, output); } }; diff --git a/src/operators/op_param.h b/src/operators/op_param.h index bb6c20200f..29abcf4b61 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -426,11 +426,11 @@ class ConvParam : public OpParam { groups = OpParam::GetAttr("groups", attrs); } - const RType *Input() const { return input_; } + const GType *Input() const { return input_; } - RType *Filter() const { return filter_; } + GType *Filter() const { return filter_; } - RType *Output() const { return output_; } + GType *Output() const { return output_; } const vector &Strides() const { return strides_; } @@ -465,10 +465,10 @@ class ConvParam : public OpParam { #endif public: - RType *input_; - RType *output_; - RType *filter_; - RType *transformed_filter_; + GType *input_; + GType *output_; + GType *filter_; + GType *transformed_filter_; vector strides_; vector paddings_; vector dilations_; @@ -728,11 +728,11 @@ class LrnParam : public OpParam { data_format_ = GetStringAttr("data_format", attrs); } - const RType *InputX() const { return input_x_; } + const GType *InputX() const { return input_x_; } - RType *Out() const { return out_; } + GType *Out() const { return out_; } - RType *MidOut() const { return mid_out_; } + GType *MidOut() const { return mid_out_; } const int &N() const { return n_; } @@ -745,9 +745,9 @@ class LrnParam : public OpParam { const string &DataFormat() const { return data_format_; } private: - RType *input_x_; - RType *out_; - RType *mid_out_; + GType *input_x_; + GType *out_; + GType *mid_out_; int n_; float alpha_; float beta_; @@ -772,20 +772,20 @@ class NormParam : OpParam { axis_ = GetAttr("axis", attrs); } - const RType *InputX() const { return input_x_; } + const GType *InputX() const { return input_x_; } - RType *Out() const { return out_; } + GType *Out() const { return out_; } - RType *OutputNorm() const { return output_norm_; } + GType *OutputNorm() const { return output_norm_; } const float &Epsilon() const { return epsilon_; } const int &Axis() const { return axis_; } private: - RType *input_x_; - RType *out_; - RType *output_norm_; + GType *input_x_; + GType *out_; + GType *output_norm_; float epsilon_; int axis_; }; @@ -811,17 +811,17 @@ class BatchNormParam : OpParam { // is_test_ = GetAttr("is_test", attrs); } - const RType *InputX() const { return input_x_; } + const GType *InputX() const { return input_x_; } - RType *OutputY() const { return output_y_; } + GType *OutputY() const { return output_y_; } - const RType *InputBias() const { return input_bias_; } + const GType *InputBias() const { return input_bias_; } - const RType *InputMean() const { return input_mean_; } + const GType *InputMean() const { return input_mean_; } - const RType *InputScale() const { return input_scale_; } + const GType *InputScale() const { return input_scale_; } - const RType *InputVariance() const { return input_variance_; } + const GType *InputVariance() const { return input_variance_; } const float &Epsilon() const { return epsilon_; } @@ -831,27 +831,27 @@ class BatchNormParam : OpParam { const string &DataFormat() const { return data_format_; } - void SetNewScale(RType *new_scale) { new_scale_ = new_scale; } + void SetNewScale(GType *new_scale) { new_scale_ = new_scale; } - void SetNewBias(RType *new_bias) { new_bias_ = new_bias; } + void SetNewBias(GType *new_bias) { new_bias_ = new_bias; } - const RType *NewScale() const { return new_scale_; } + const GType *NewScale() const { return new_scale_; } - const RType *NewBias() const { return new_bias_; } + const GType *NewBias() const { return new_bias_; } private: - RType *input_x_; - RType *output_y_; - RType *input_bias_; - RType *input_mean_; - RType *input_scale_; - RType *input_variance_; + GType *input_x_; + GType *output_y_; + GType *input_bias_; + GType *input_mean_; + GType *input_scale_; + GType *input_variance_; float epsilon_; float momentum_; bool is_test_; string data_format_; - RType *new_bias_; - RType *new_scale_; + GType *new_bias_; + GType *new_scale_; }; #endif @@ -875,9 +875,9 @@ class PoolParam : public OpParam { global_pooling_ = GetAttr("global_pooling", attrs); } - const RType *Input() const { return input_; } + const GType *Input() const { return input_; } - RType *Output() const { return output_; } + GType *Output() const { return output_; } const string &PoolingType() const { return pooling_type_; } @@ -892,8 +892,8 @@ class PoolParam : public OpParam { bool isGlobalPooling() const { return global_pooling_; } private: - RType *input_; - RType *output_; + GType *input_; + GType *output_; string pooling_type_; vector ksize_; vector strides_; @@ -942,13 +942,13 @@ class PriorBoxParam : public OpParam { step_h_ = GetAttr("step_h", attrs); offset_ = GetAttr("offset", attrs); } - const RType *Input() const { return input_; } + const GType *Input() const { return input_; } - const RType *InputImage() const { return input_image_; } + const GType *InputImage() const { return input_image_; } - RType *OutputBoxes() const { return output_boxes_; } + GType *OutputBoxes() const { return output_boxes_; } - RType *OutputVariances() const { return output_variances_; } + GType *OutputVariances() const { return output_variances_; } const vector &MinSizes() const { return min_sizes_; } @@ -973,10 +973,10 @@ class PriorBoxParam : public OpParam { } private: - RType *input_; - RType *input_image_; - RType *output_boxes_; - RType *output_variances_; + GType *input_; + GType *input_image_; + GType *output_boxes_; + GType *output_variances_; vector min_sizes_; vector max_sizes_; vector aspect_ratios_; @@ -1005,21 +1005,21 @@ class BoxCoderParam : public OpParam { output_box_ = OutputBoxFrom(outputs, scope); code_type_ = GetStringAttr("code_type", attrs); } - const RType *InputPriorBox() const { return input_priorbox_; } + const GType *InputPriorBox() const { return input_priorbox_; } - const RType *InputPriorBoxVar() const { return input_priorboxvar_; } + const GType *InputPriorBoxVar() const { return input_priorboxvar_; } - const RType *InputTargetBox() const { return input_targetbox_; } + const GType *InputTargetBox() const { return input_targetbox_; } - RType *OutputBox() const { return output_box_; } + GType *OutputBox() const { return output_box_; } const std::string &CodeType() const { return code_type_; } private: - RType *input_priorbox_; - RType *input_priorboxvar_; - RType *input_targetbox_; - RType *output_box_; + GType *input_priorbox_; + GType *input_priorboxvar_; + GType *input_targetbox_; + GType *output_box_; std::string code_type_; }; #endif @@ -1046,11 +1046,11 @@ class SoftmaxParam : public OpParam { #ifdef PADDLE_MOBILE_FPGA private: - std::shared_ptr float_input_x_; + std::shared_ptr float_input_x_; fpga::BypassArgs fpga_bypass_args; public: - RType *FloatInput() const { + GType *FloatInput() const { return float_input_x_ == nullptr ? input_x_ : float_input_x_.get(); } void SetFloatInput(Tensor *input) { float_input_x_.reset(input); } @@ -1072,12 +1072,12 @@ class SigmoidParam : public OpParam { input_x_ = InputXFrom(inputs, scope); out_ = OutFrom(outputs, scope); } - const RType *InputX() const { return input_x_; } - RType *Out() const { return out_; } + const GType *InputX() const { return input_x_; } + GType *Out() const { return out_; } private: - RType *input_x_; - RType *out_; + GType *input_x_; + GType *out_; #ifdef PADDLE_MOBILE_FPGA private: @@ -1111,11 +1111,11 @@ class MultiClassNMSParam : public OpParam { score_threshold_ = GetAttr("score_threshold", attrs); } - RType *InputBBoxes() const { return input_bboxes_; } + GType *InputBBoxes() const { return input_bboxes_; } - RType *InputScores() const { return input_scores_; } + GType *InputScores() const { return input_scores_; } - RType *Out() const { return out_; } + GType *Out() const { return out_; } const int &BackGroundLabel() const { return background_label_; } @@ -1130,9 +1130,9 @@ class MultiClassNMSParam : public OpParam { const float &ScoreThreshold() const { return score_threshold_; } private: - RType *input_bboxes_; - RType *input_scores_; - RType *out_; + GType *input_bboxes_; + GType *input_scores_; + GType *out_; int background_label_; int nms_top_k_; int keep_top_k_; @@ -1155,12 +1155,12 @@ class PolygonBoxTransformParam : public OpParam { input_ = InputFrom(inputs, scope); output_ = OutputFrom(outputs, scope); } - const RType *Input() const { return input_; } - RType *Output() const { return output_; } + const GType *Input() const { return input_; } + GType *Output() const { return output_; } private: - RType *input_; - RType *output_; + GType *input_; + GType *output_; }; #endif @@ -1214,11 +1214,11 @@ class FetchParam : public OpParam { #ifdef PADDLE_MOBILE_FPGA private: - std::shared_ptr float_input_x_; + std::shared_ptr float_input_x_; fpga::BypassArgs fpga_bypass_args; public: - RType *FloatInput() const { + GType *FloatInput() const { return float_input_x_ == nullptr ? input_x_ : float_input_x_.get(); } void SetFloatInput(Tensor *input) { float_input_x_.reset(input); } @@ -1246,7 +1246,7 @@ class FillConstantParam : public OpParam { Variable *OutVar() const { return out_var_; } - RType *Out() const { return out_; } + GType *Out() const { return out_; } const int &DataDtype() const { return dtype_; } @@ -1256,7 +1256,7 @@ class FillConstantParam : public OpParam { private: Variable *out_var_; - RType *out_; + GType *out_; int dtype_; vector shape_; float value_; @@ -1277,15 +1277,15 @@ class TransposeParam : public OpParam { axis_ = GetAttr>("axis", attrs); } - const RType *InputX() const { return input_x_; } + const GType *InputX() const { return input_x_; } - RType *Out() const { return out_; } + GType *Out() const { return out_; } const vector &Axis() const { return axis_; } private: - RType *input_x_; - RType *out_; + GType *input_x_; + GType *out_; vector axis_; }; #endif @@ -1305,18 +1305,18 @@ class Transpose2Param : public OpParam { axis_ = GetAttr>("axis", attrs); } - const RType *InputX() const { return input_x_; } + const GType *InputX() const { return input_x_; } - RType *Out() const { return out_; } + GType *Out() const { return out_; } - RType *OutputXShape() const { return output_xshape_; } + GType *OutputXShape() const { return output_xshape_; } const vector &Axis() const { return axis_; } private: - RType *input_x_; - RType *out_; - RType *output_xshape_; + GType *input_x_; + GType *out_; + GType *output_xshape_; vector axis_; }; #endif @@ -1371,8 +1371,8 @@ class CrfParam : public OpParam { const GType *InputTransition() const { return input_transition_; } const GType *InputLabel() const { return input_label_; } GType *outputVBP() const { return output_viterbipath_; } - // const RType *InputIds() const { return input_ids_; } - // RType *Out() const { return out_; } + // const GType *InputIds() const { return input_ids_; } + // GType *Out() const { return out_; } // int64_t PaddingIdx() const { return padding_idx_; } private: @@ -1381,8 +1381,8 @@ class CrfParam : public OpParam { GType *input_label_; GType *output_viterbipath_; - // RType *input_ids_; - // RType *out_; + // GType *input_ids_; + // GType *out_; // int64_t padding_idx_; }; #endif @@ -1409,20 +1409,20 @@ class ReshapeParam : public OpParam { } } - const RType *InputX() const { return input_x_; } + const GType *InputX() const { return input_x_; } - const RType *InputShape() const { return input_shape_; } + const GType *InputShape() const { return input_shape_; } - RType *Out() const { return out_; } + GType *Out() const { return out_; } const vector &Shape() const { return shape_; } const bool &Inplace() const { return inplace_; } private: - RType *input_x_; - RType *input_shape_; - RType *out_; + GType *input_x_; + GType *input_shape_; + GType *out_; vector shape_; bool inplace_; }; @@ -1489,11 +1489,11 @@ class ScaleParam : public OpParam { biases_ = GetAttr>("biases", attrs); } - const RType *InputX() const { return input_x_; } + const GType *InputX() const { return input_x_; } - const RType *InputBias() const { return input_bias_; } + const GType *InputBias() const { return input_bias_; } - RType *Out() const { return out_; } + GType *Out() const { return out_; } const bool &Inplace() const { return inplace_; } @@ -1504,9 +1504,9 @@ class ScaleParam : public OpParam { const vector &Biases() const { return biases_; } private: - RType *input_x_; - RType *input_bias_; - RType *out_; + GType *input_x_; + GType *input_bias_; + GType *out_; bool inplace_; bool has_bias_; vector scales_; @@ -1559,11 +1559,11 @@ class ResizeParam : public OpParam { out_width_scale_ = GetAttr("out_width_scale", attrs); } - const RType *InputX() const { return input_x_; } + const GType *InputX() const { return input_x_; } - const RType *InputShape() const { return input_shape_; } + const GType *InputShape() const { return input_shape_; } - RType *Out() const { return out_; } + GType *Out() const { return out_; } const bool &IsPyramidTest() const { return is_pyramid_test_; } @@ -1576,9 +1576,9 @@ class ResizeParam : public OpParam { const float &OutWidthScale() const { return out_width_scale_; } private: - RType *input_x_; - RType *input_shape_; - RType *out_; + GType *input_x_; + GType *input_shape_; + GType *out_; bool is_pyramid_test_; int height_; int width_; @@ -1603,13 +1603,13 @@ class ReluParamBase : public OpParam { out_ = OutFrom(outputs, scope); } - const RType *InputX() const { return input_x_; } + const GType *InputX() const { return input_x_; } - RType *Out() const { return out_; } + GType *Out() const { return out_; } private: - RType *input_x_; - RType *out_; + GType *input_x_; + GType *out_; }; template @@ -1644,20 +1644,20 @@ class TanhParam : public OpParam { input_x_ = InputXFrom(inputs, scope); out_ = OutFrom(outputs, scope); } - const RType *InputX() const { return input_x_; } - RType *Out() const { return out_; } + const GType *InputX() const { return input_x_; } + GType *Out() const { return out_; } private: - RType *input_x_; - RType *out_; + GType *input_x_; + GType *out_; #ifdef PADDLE_MOBILE_FPGA private: - std::shared_ptr float_input_x_; + std::shared_ptr float_input_x_; fpga::BypassArgs fpga_bypass_args; public: - RType *FloatInput() const { + GType *FloatInput() const { return float_input_x_ == nullptr ? input_x_ : float_input_x_.get(); } void SetFloatInput(Tensor *input) { float_input_x_.reset(input); } @@ -1684,15 +1684,15 @@ class PReluParam : public OpParam { mode_ = GetStringAttr("mode", attrs); DLOG << "PReluParam mode after" << mode_; } - const RType *InputX() const { return input_x_; } - const RType *InputAlpha() const { return alpha_; } - RType *Out() const { return out_; } + const GType *InputX() const { return input_x_; } + const GType *InputAlpha() const { return alpha_; } + GType *Out() const { return out_; } const std::string &Mode() const { return mode_; } private: - RType *input_x_; - RType *out_; - RType *alpha_; + GType *input_x_; + GType *out_; + GType *alpha_; std::string mode_; }; #endif @@ -1715,9 +1715,9 @@ class FusionFcParam : public OpParam { } GType *InputX() const { return input_x_; } - RType *InputY() const { return input_y_; } + GType *InputY() const { return input_y_; } - RType *InputZ() const { return input_z_; } + GType *InputZ() const { return input_z_; } GType *Out() const { return out_; } @@ -1729,8 +1729,8 @@ class FusionFcParam : public OpParam { private: GType *input_x_; - RType *input_y_; - RType *input_z_; + GType *input_y_; + GType *input_z_; GType *out_; int x_num_col_dims_; int y_num_col_dims_; @@ -1765,16 +1765,16 @@ class FusionConvAddParam : public ConvParam { axis_ = OpParam::GetAttr("axis", attrs); output_ = OpParam::OutFrom(outputs, scope); } - RType *Bias() const { return bias_; } + GType *Bias() const { return bias_; } const int &Axis() const { return axis_; } - RType *Output() const { return output_; } + GType *Output() const { return output_; } protected: - RType *bias_; + GType *bias_; int axis_; - RType *output_; + GType *output_; }; template @@ -1809,17 +1809,17 @@ class FusionConvAddPReluParam : public ConvParam { axis_ = OpParam::GetAttr("axis", attrs); output_ = OpParam::OutFrom(outputs, scope); } - const RType *InputAlpha() const { return alpha_; } + const GType *InputAlpha() const { return alpha_; } const std::string &Mode() const { return mode_; } - RType *Bias() const { return bias_; } + GType *Bias() const { return bias_; } const int &Axis() const { return axis_; } - RType *Output() const { return output_; } + GType *Output() const { return output_; } protected: - RType *bias_; + GType *bias_; int axis_; - RType *output_; - RType *alpha_; + GType *output_; + GType *alpha_; std::string mode_; }; #endif @@ -1851,22 +1851,22 @@ class FusionConvAddAddPReluParam : public ConvParam { bias1_ = OpParam::InputXFrom1(inputs, scope); } } - const RType *InputAlpha() const { return alpha_; } + const GType *InputAlpha() const { return alpha_; } const std::string &Mode() const { return mode_; } - const RType *Bias1() const { return bias1_; } + const GType *Bias1() const { return bias1_; } - RType *Bias() const { return bias_; } + GType *Bias() const { return bias_; } const int &Axis() const { return axis_; } - RType *Output() const { return output_; } + GType *Output() const { return output_; } protected: - RType *bias_; + GType *bias_; int axis_; - RType *output_; - RType *alpha_; + GType *output_; + GType *alpha_; std::string mode_; - RType *bias1_; + GType *bias1_; std::string keyOutput_; std::string keyX1_; std::string keyY1_; @@ -1895,19 +1895,19 @@ class FusionConvAddBNReluParam : public ConvParam { momentum_ = OpParam::GetAttr("momentum", attrs); // is_test_ = OpParam::GetAttr("is_test", attrs); } - RType *Bias() const { return bias_; } + GType *Bias() const { return bias_; } const int &Axis() const { return axis_; } - RType *Output() const { return output_; } + GType *Output() const { return output_; } - const RType *InputBias() const { return input_bias_; } + const GType *InputBias() const { return input_bias_; } - const RType *InputMean() const { return input_mean_; } + const GType *InputMean() const { return input_mean_; } - const RType *InputScale() const { return input_scale_; } + const GType *InputScale() const { return input_scale_; } - const RType *InputVariance() const { return input_variance_; } + const GType *InputVariance() const { return input_variance_; } const float &Epsilon() const { return epsilon_; } @@ -1915,27 +1915,27 @@ class FusionConvAddBNReluParam : public ConvParam { const bool &IsTest() const { return is_test_; } - void SetNewScale(RType *new_scale) { new_scale_ = new_scale; } + void SetNewScale(GType *new_scale) { new_scale_ = new_scale; } - void SetNewBias(RType *new_bias) { new_bias_ = new_bias; } + void SetNewBias(GType *new_bias) { new_bias_ = new_bias; } - const RType *NewScale() const { return new_scale_; } + const GType *NewScale() const { return new_scale_; } - const RType *NewBias() const { return new_bias_; } + const GType *NewBias() const { return new_bias_; } protected: - RType *bias_; + GType *bias_; int axis_; - RType *output_; - RType *input_bias_; - RType *input_mean_; - RType *input_scale_; - RType *input_variance_; + GType *output_; + GType *input_bias_; + GType *input_mean_; + GType *input_scale_; + GType *input_variance_; float epsilon_; float momentum_; bool is_test_; - RType *new_bias_; - RType *new_scale_; + GType *new_bias_; + GType *new_scale_; }; #endif @@ -1969,19 +1969,19 @@ class FusionConvBNAddReluParam : public ConvParam { } // is_test_ = OpParam::GetAttr("is_test", attrs); } - RType *Bias() const { return bias_; } + GType *Bias() const { return bias_; } const int &Axis() const { return axis_; } - RType *Output() const { return output_; } + GType *Output() const { return output_; } - const RType *InputBias() const { return input_bias_; } + const GType *InputBias() const { return input_bias_; } - const RType *InputMean() const { return input_mean_; } + const GType *InputMean() const { return input_mean_; } - const RType *InputScale() const { return input_scale_; } + const GType *InputScale() const { return input_scale_; } - const RType *InputVariance() const { return input_variance_; } + const GType *InputVariance() const { return input_variance_; } const float &Epsilon() const { return epsilon_; } @@ -1989,27 +1989,27 @@ class FusionConvBNAddReluParam : public ConvParam { const bool &IsTest() const { return is_test_; } - void SetNewScale(RType *new_scale) { new_scale_ = new_scale; } + void SetNewScale(GType *new_scale) { new_scale_ = new_scale; } - void SetNewBias(RType *new_bias) { new_bias_ = new_bias; } + void SetNewBias(GType *new_bias) { new_bias_ = new_bias; } - const RType *NewScale() const { return new_scale_; } + const GType *NewScale() const { return new_scale_; } - const RType *NewBias() const { return new_bias_; } + const GType *NewBias() const { return new_bias_; } protected: - RType *bias_; + GType *bias_; int axis_; - RType *output_; - RType *input_bias_; - RType *input_mean_; - RType *input_scale_; - RType *input_variance_; + GType *output_; + GType *input_bias_; + GType *input_mean_; + GType *input_scale_; + GType *input_variance_; float epsilon_; float momentum_; bool is_test_; - RType *new_bias_; - RType *new_scale_; + GType *new_bias_; + GType *new_scale_; std::string keyBNY_; std::string keyX_; std::string keyY_; @@ -2036,15 +2036,15 @@ class FusionConvBNParam : public ConvParam { momentum_ = OpParam::GetAttr("momentum", attrs); // is_test_ = OpParam::GetAttr("is_test", attrs); } - RType *Output() const { return output_y_; } + GType *Output() const { return output_y_; } - const RType *InputBias() const { return input_bias_; } + const GType *InputBias() const { return input_bias_; } - const RType *InputMean() const { return input_mean_; } + const GType *InputMean() const { return input_mean_; } - const RType *InputScale() const { return input_scale_; } + const GType *InputScale() const { return input_scale_; } - const RType *InputVariance() const { return input_variance_; } + const GType *InputVariance() const { return input_variance_; } const float &Epsilon() const { return epsilon_; } @@ -2052,25 +2052,25 @@ class FusionConvBNParam : public ConvParam { const bool &IsTest() const { return is_test_; } - void SetNewScale(RType *new_scale) { new_scale_ = new_scale; } + void SetNewScale(GType *new_scale) { new_scale_ = new_scale; } - void SetNewBias(RType *new_bias) { new_bias_ = new_bias; } + void SetNewBias(GType *new_bias) { new_bias_ = new_bias; } - const RType *NewScale() const { return new_scale_; } + const GType *NewScale() const { return new_scale_; } - const RType *NewBias() const { return new_bias_; } + const GType *NewBias() const { return new_bias_; } protected: - RType *output_y_; - RType *input_bias_; - RType *input_mean_; - RType *input_scale_; - RType *input_variance_; + GType *output_y_; + GType *input_bias_; + GType *input_mean_; + GType *input_scale_; + GType *input_variance_; float epsilon_; float momentum_; bool is_test_; - RType *new_bias_; - RType *new_scale_; + GType *new_bias_; + GType *new_scale_; }; #endif @@ -2096,19 +2096,19 @@ class FusionConvAddBNParam : public ConvParam { momentum_ = OpParam::GetAttr("momentum", attrs); // is_test_ = OpParam::GetAttr("is_test", attrs); } - RType *Bias() const { return bias_; } + GType *Bias() const { return bias_; } const int &Axis() const { return axis_; } - RType *Output() const { return output_y_; } + GType *Output() const { return output_y_; } - const RType *InputBias() const { return input_bias_; } + const GType *InputBias() const { return input_bias_; } - const RType *InputMean() const { return input_mean_; } + const GType *InputMean() const { return input_mean_; } - const RType *InputScale() const { return input_scale_; } + const GType *InputScale() const { return input_scale_; } - const RType *InputVariance() const { return input_variance_; } + const GType *InputVariance() const { return input_variance_; } const float &Epsilon() const { return epsilon_; } @@ -2116,27 +2116,27 @@ class FusionConvAddBNParam : public ConvParam { const bool &IsTest() const { return is_test_; } - void SetNewScale(RType *new_scale) { new_scale_ = new_scale; } + void SetNewScale(GType *new_scale) { new_scale_ = new_scale; } - void SetNewBias(RType *new_bias) { new_bias_ = new_bias; } + void SetNewBias(GType *new_bias) { new_bias_ = new_bias; } - const RType *NewScale() const { return new_scale_; } + const GType *NewScale() const { return new_scale_; } - const RType *NewBias() const { return new_bias_; } + const GType *NewBias() const { return new_bias_; } protected: - RType *bias_; + GType *bias_; int axis_; - RType *output_y_; - RType *input_bias_; - RType *input_mean_; - RType *input_scale_; - RType *input_variance_; + GType *output_y_; + GType *input_bias_; + GType *input_mean_; + GType *input_scale_; + GType *input_variance_; float epsilon_; float momentum_; bool is_test_; - RType *new_bias_; - RType *new_scale_; + GType *new_bias_; + GType *new_scale_; }; #endif @@ -2160,15 +2160,15 @@ class FusionDWConvBNReluParam : public ConvParam { momentum_ = OpParam::GetAttr("momentum", attrs); // is_test_ = OpParam::GetAttr("is_test", attrs); } - RType *Output() const { return output_; } + GType *Output() const { return output_; } - const RType *InputBias() const { return input_bias_; } + const GType *InputBias() const { return input_bias_; } - const RType *InputMean() const { return input_mean_; } + const GType *InputMean() const { return input_mean_; } - const RType *InputScale() const { return input_scale_; } + const GType *InputScale() const { return input_scale_; } - const RType *InputVariance() const { return input_variance_; } + const GType *InputVariance() const { return input_variance_; } const float &Epsilon() const { return epsilon_; } @@ -2176,25 +2176,25 @@ class FusionDWConvBNReluParam : public ConvParam { const bool &IsTest() const { return is_test_; } - void SetNewScale(RType *new_scale) { new_scale_ = new_scale; } + void SetNewScale(GType *new_scale) { new_scale_ = new_scale; } - void SetNewBias(RType *new_bias) { new_bias_ = new_bias; } + void SetNewBias(GType *new_bias) { new_bias_ = new_bias; } - const RType *NewScale() const { return new_scale_; } + const GType *NewScale() const { return new_scale_; } - const RType *NewBias() const { return new_bias_; } + const GType *NewBias() const { return new_bias_; } protected: - RType *output_; - RType *input_bias_; - RType *input_mean_; - RType *input_scale_; - RType *input_variance_; + GType *output_; + GType *input_bias_; + GType *input_mean_; + GType *input_scale_; + GType *input_variance_; float epsilon_; float momentum_; bool is_test_; - RType *new_bias_; - RType *new_scale_; + GType *new_bias_; + GType *new_scale_; }; #endif @@ -2219,15 +2219,15 @@ class FusionConvBNReluParam : public ConvParam { momentum_ = OpParam::GetAttr("momentum", attrs); // is_test_ = OpParam::GetAttr("is_test", attrs); } - RType *Output() const { return output_; } + GType *Output() const { return output_; } - const RType *InputBias() const { return input_bias_; } + const GType *InputBias() const { return input_bias_; } - const RType *InputMean() const { return input_mean_; } + const GType *InputMean() const { return input_mean_; } - const RType *InputScale() const { return input_scale_; } + const GType *InputScale() const { return input_scale_; } - const RType *InputVariance() const { return input_variance_; } + const GType *InputVariance() const { return input_variance_; } const float &Epsilon() const { return epsilon_; } @@ -2235,25 +2235,25 @@ class FusionConvBNReluParam : public ConvParam { const bool &IsTest() const { return is_test_; } - void SetNewScale(RType *new_scale) { new_scale_ = new_scale; } + void SetNewScale(GType *new_scale) { new_scale_ = new_scale; } - void SetNewBias(RType *new_bias) { new_bias_ = new_bias; } + void SetNewBias(GType *new_bias) { new_bias_ = new_bias; } - const RType *NewScale() const { return new_scale_; } + const GType *NewScale() const { return new_scale_; } - const RType *NewBias() const { return new_bias_; } + const GType *NewBias() const { return new_bias_; } protected: - RType *output_; - RType *input_bias_; - RType *input_mean_; - RType *input_scale_; - RType *input_variance_; + GType *output_; + GType *input_bias_; + GType *input_mean_; + GType *input_scale_; + GType *input_variance_; float epsilon_; float momentum_; bool is_test_; - RType *new_bias_; - RType *new_scale_; + GType *new_bias_; + GType *new_scale_; }; #endif @@ -2308,15 +2308,15 @@ class DropoutParam : public OpParam { dropout_prob_ = GetAttr("dropout_prob", attrs); } - const RType *InputX() const { return input_x_; } + const GType *InputX() const { return input_x_; } - RType *Out() const { return out_; } + GType *Out() const { return out_; } float DropoutProb() const { return dropout_prob_; } private: - RType *input_x_; - RType *out_; + GType *input_x_; + GType *out_; float dropout_prob_; }; #endif @@ -2342,11 +2342,11 @@ class ConvTransposeParam : public OpParam { groups = GetAttr("groups", attrs); } - const RType *Input() const { return input_; } + const GType *Input() const { return input_; } - const RType *Filter() const { return filter_; } + const GType *Filter() const { return filter_; } - RType *Output() const { return output_; } + GType *Output() const { return output_; } const vector &Strides() const { return strides_; } @@ -2357,9 +2357,9 @@ class ConvTransposeParam : public OpParam { const int &Groups() const { return groups; } private: - RType *input_; - RType *output_; - RType *filter_; + GType *input_; + GType *output_; + GType *filter_; vector strides_; vector paddings_; vector dilations_; @@ -2398,16 +2398,16 @@ class FusionDeconvAddParam : public ConvTransposeParam { axis_ = OpParam::GetAttr("axis", attrs); output_ = OpParam::OutFrom(outputs, scope); } - RType *Bias() const { return bias_; } + GType *Bias() const { return bias_; } const int &Axis() const { return axis_; } - RType *Output() const { return output_; } + GType *Output() const { return output_; } protected: - RType *bias_; + GType *bias_; int axis_; - RType *output_; + GType *output_; }; #endif @@ -2539,13 +2539,13 @@ class FlattenParam : public OpParam { out_ = OutFrom(outputs, scope); axis = GetAttr("axis", attrs); } - const RType *InputX() const { return input_x_; } - RType *Out() const { return out_; } + const GType *InputX() const { return input_x_; } + GType *Out() const { return out_; } const int &Axis() const { return axis; } private: - RType *input_x_; - RType *out_; + GType *input_x_; + GType *out_; int axis; }; #endif @@ -2569,7 +2569,7 @@ class SplitParam : public OpParam { // out_ts_.push_back(*scope.FindVar(outs_[i])->GetMutable()); // } } - const RType *InputX() const { return input_x_; } + const GType *InputX() const { return input_x_; } std::vector Outs() const { return outs_; } int Axis() const { return axis; } int Num() const { return num; } @@ -2577,7 +2577,7 @@ class SplitParam : public OpParam { // std::vector OutTs() const { return out_ts_; } private: - RType *input_x_; + GType *input_x_; std::vector outs_; int axis; int num; @@ -2611,16 +2611,16 @@ class BilinearInterpParam : public OpParam { out_h_ = GetAttr("out_h", attrs); out_w_ = GetAttr("out_w", attrs); } - const RType *InputX() const { return input_x_; } - const RType *InputOutPutSize() const { return input_outsize_; } - RType *Out() const { return out_; } + const GType *InputX() const { return input_x_; } + const GType *InputOutPutSize() const { return input_outsize_; } + GType *Out() const { return out_; } int OutH() const { return out_h_; } int OutW() const { return out_w_; } private: - RType *input_x_; - RType *input_outsize_; - RType *out_; + GType *input_x_; + GType *input_outsize_; + GType *out_; int out_h_; int out_w_; }; @@ -2638,12 +2638,12 @@ class ShapeParam : public OpParam { input_ = InputFrom(inputs, scope); out_ = OutFrom(outputs, scope); } - const RType *Input() const { return input_; } - RType *Out() const { return out_; } + const GType *Input() const { return input_; } + GType *Out() const { return out_; } private: - RType *input_; - RType *out_; + GType *input_; + GType *out_; }; #endif @@ -2686,8 +2686,8 @@ class CastParam : public OpParam { } public: - RType *input_; - RType *output_; + GType *input_; + GType *output_; int input_type_; int output_type_; }; @@ -2723,9 +2723,9 @@ class QuantizeParam : public OpParam { GType *input_; // op output GType *output_; - RType *online_scale_; + GType *online_scale_; // quantize offline scale - RType *offline_scale_; + GType *offline_scale_; // if offine scale or not bool offline_ = false; // round method type @@ -2759,7 +2759,7 @@ class DequantizeParam : public OpParam { GType *input_; // op output GType *output_; - RType *activation_scale_; + GType *activation_scale_; float weight_scale_; }; #endif @@ -2789,10 +2789,10 @@ class FusionDequantBNParam : public DequantizeParam { public: // batch norm - RType *bn_mean_; - RType *bn_variance_; - RType *bn_scale_; - RType *bn_bias_; + GType *bn_mean_; + GType *bn_variance_; + GType *bn_scale_; + GType *bn_bias_; float epsilon_; }; #endif @@ -2819,7 +2819,7 @@ class FusionDequantAddBNParam : public FusionDequantBNParam { public: // elementwise add int axis_; - RType *bias_; + GType *bias_; }; #endif @@ -2848,9 +2848,9 @@ class FusionDequantAddBNQuantParam : public FusionDequantAddBNParam { } public: - RType *online_scale_; + GType *online_scale_; // quantize offline scale - RType *offline_scale_; + GType *offline_scale_; // if offine scale or not bool offline_ = false; // round method type diff --git a/src/operators/softmax_op.cpp b/src/operators/softmax_op.cpp index e605864706..e4e6a8cf30 100644 --- a/src/operators/softmax_op.cpp +++ b/src/operators/softmax_op.cpp @@ -21,6 +21,7 @@ namespace operators { template void SoftmaxOp::InferShape() const { this->param_.Out()->Resize(this->param_.InputX()->dims()); + this->param_.Out()->set_lod(this->param_.InputX()->lod()); } } // namespace operators diff --git a/src/operators/top_k_op.cpp b/src/operators/top_k_op.cpp index 09c61f51d6..d5cf6a37e9 100644 --- a/src/operators/top_k_op.cpp +++ b/src/operators/top_k_op.cpp @@ -26,11 +26,9 @@ void TopKOp::InferShape() const { // should check k <= dims[-1] && k >= 1 dims[dims.size() - 1] = k; this->param_.output_->Resize(dims); - // this->param_.output_->set_lod(this->param_.input_->lod()); - this->param_.output_->set_lod({{0, 1}}); this->param_.indices_->Resize(dims); - // this->param_.indices_->set_lod(this->param_.input_->lod()); - this->param_.indices_->set_lod({{0, 1}}); + this->param_.output_->set_lod(this->param_.input_->lod()); + this->param_.indices_->set_lod(this->param_.input_->lod()); } } // namespace operators -- GitLab