diff --git a/mindspore/lite/schema/model.fbs b/mindspore/lite/schema/model.fbs index e78fe29d1d94d920bb55ee8f6438bacee289dbea..4031af06f541f7fdbf46353b28430b898272f2dc 100644 --- a/mindspore/lite/schema/model.fbs +++ b/mindspore/lite/schema/model.fbs @@ -80,7 +80,7 @@ union PrimitiveType { Pad, Maximum, Minimum, - CaffePReLU, + PReLU, LeakyReLU, ArgMax, ArgMin, @@ -126,7 +126,6 @@ union PrimitiveType { Broadcast, BroadcastTo, Lrn, - Prelu, ZerosLike, TopK, SpaceToDepth, diff --git a/mindspore/lite/schema/ops.fbs b/mindspore/lite/schema/ops.fbs index ebb82b7f2e7e7671730d09c138a29c5b150ebd04..592b23f964d45a1e2d2cb07bbd47b7c33bb20df8 100644 --- a/mindspore/lite/schema/ops.fbs +++ b/mindspore/lite/schema/ops.fbs @@ -540,7 +540,7 @@ table MatMul { transposeB : bool = false; } -table CaffePReLU { +table PReLU { channelShared : bool = false; slope: [float]; } @@ -650,10 +650,6 @@ table Reduce { mode: ReduceMode; } -table Prelu { - slope: [float]; -} - table Transpose { perm: [int]; conjugate: bool = false; diff --git a/mindspore/lite/src/ops/caffe_p_relu.cc b/mindspore/lite/src/ops/p_relu.cc similarity index 66% rename from mindspore/lite/src/ops/caffe_p_relu.cc rename to mindspore/lite/src/ops/p_relu.cc index b858a9623b28a20e2050045561829dd4c1161b6f..b2f1bdadf2793588771cbd4ab324193514a0d92f 100644 --- a/mindspore/lite/src/ops/caffe_p_relu.cc +++ b/mindspore/lite/src/ops/p_relu.cc @@ -14,20 +14,20 @@ * limitations under the License. */ -#include "src/ops/caffe_p_relu.h" +#include "src/ops/p_relu.h" namespace mindspore { namespace lite { #ifdef PRIMITIVE_WRITEABLE -bool CaffePReLU::GetChannelShared() const { return this->primitive_->value.AsCaffePReLU()->channelShared; } +bool PReLU::GetChannelShared() const { return this->primitive_->value.AsPReLU()->channelShared; } -void CaffePReLU::SetChannelShared(bool channel_shared) { - this->primitive_->value.AsCaffePReLU()->channelShared = channel_shared; +void PReLU::SetChannelShared(bool channel_shared) { + this->primitive_->value.AsPReLU()->channelShared = channel_shared; } #else -bool CaffePReLU::GetChannelShared() const { return this->primitive_->value_as_CaffePReLU()->channelShared(); } +bool PReLU::GetChannelShared() const { return this->primitive_->value_as_PReLU()->channelShared(); } #endif } // namespace lite diff --git a/mindspore/lite/src/ops/caffe_p_relu.h b/mindspore/lite/src/ops/p_relu.h similarity index 73% rename from mindspore/lite/src/ops/caffe_p_relu.h rename to mindspore/lite/src/ops/p_relu.h index fa7ba23ddc78f5cb1c9d8a16dea83c5294c4aa24..1f2c04a41c0f0bcf86d27ad5f0aaae98da5a3bc9 100644 --- a/mindspore/lite/src/ops/caffe_p_relu.h +++ b/mindspore/lite/src/ops/p_relu.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef LITE_MINDSPORE_LITE_C_OPS_CAFFE_P_RE_L_U_H_ -#define LITE_MINDSPORE_LITE_C_OPS_CAFFE_P_RE_L_U_H_ +#ifndef LITE_MINDSPORE_LITE_C_OPS_P_RELU_H_ +#define LITE_MINDSPORE_LITE_C_OPS_P_RELU_H_ #include #include @@ -26,21 +26,21 @@ namespace mindspore { namespace lite { -class CaffePReLU : public Activation { +class PReLU : public Activation { public: #ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(CaffePReLU, Activation); - CaffePReLU() = default; - explicit CaffePReLU(schema::PrimitiveT *primitive) : Activation(primitive) {} + MS_DECLARE_PARENT(PReLU, Activation); + PReLU() = default; + explicit PReLU(schema::PrimitiveT *primitive) : Activation(primitive) {} void SetChannelShared(bool channel_shared); #else - explicit CaffePReLU(schema::Primitive *primitive) : Activation(primitive) {} + explicit PReLU(schema::Primitive *primitive) : Activation(primitive) {} schema::Primitive *Init(schema::Primitive *primitive) { flatbuffers::FlatBufferBuilder fbb(1024); - auto attr = primitive->value_as_CaffePReLU(); + auto attr = primitive->value_as_PReLU(); MS_ASSERT(attr != nullptr); auto slope = std::make_unique>(); @@ -48,8 +48,8 @@ class CaffePReLU : public Activation { slope->push_back(attr->slope()->data()[i]); } - auto val_offset = schema::CreateCaffePReLUDirect(fbb, attr->channelShared(), slope.release()); - auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_CaffePReLU, val_offset.o); + auto val_offset = schema::CreatePReLUDirect(fbb, attr->channelShared(), slope.release()); + auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_PReLU, val_offset.o); fbb.Finish(prim_offset); auto buf = fbb.GetBufferPointer(); @@ -70,4 +70,4 @@ class CaffePReLU : public Activation { } // namespace lite } // namespace mindspore -#endif // LITE_MINDSPORE_LITE_C_OPS_CAFFE_P_RE_L_U_H_ +#endif // LITE_MINDSPORE_LITE_C_OPS_P_RELU_H_ diff --git a/mindspore/lite/src/ops/prelu.cc b/mindspore/lite/src/ops/prelu.cc deleted file mode 100644 index 23219dd053fdd4a900763ee52885cb6ff8355fd3..0000000000000000000000000000000000000000 --- a/mindspore/lite/src/ops/prelu.cc +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/prelu.h" - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector Prelu::GetSlope() const { return this->primitive_->value.AsPrelu()->slope; } - -void Prelu::SetSlope(const std::vector &slope) { this->primitive_->value.AsPrelu()->slope = slope; } - -#else - -std::vector Prelu::GetSlope() const { - auto fb_vector = this->primitive_->value_as_Prelu()->slope(); - return std::vector(fb_vector->begin(), fb_vector->end()); -} - -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/prelu.h b/mindspore/lite/src/ops/prelu.h deleted file mode 100644 index 4a5fa427185f038c4bd007a1d5f72c223c2ed9aa..0000000000000000000000000000000000000000 --- a/mindspore/lite/src/ops/prelu.h +++ /dev/null @@ -1,72 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_PRELU_H_ -#define LITE_MINDSPORE_LITE_C_OPS_PRELU_H_ - -#include -#include -#include -#include -#include "ir/dtype/type_id.h" -#include "src/ops/activation.h" - -namespace mindspore { -namespace lite { -class Prelu : public Activation { - public: -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Prelu, PrimitiveC); - Prelu() = default; - explicit Prelu(schema::PrimitiveT *primitive) : Activation(primitive) {} - void SetSlope(const std::vector &slope); - -#else - explicit Prelu(schema::Primitive *primitive) : Activation(primitive) {} - - schema::Primitive *Init(schema::Primitive *primitive) { - flatbuffers::FlatBufferBuilder fbb(1024); - - auto attr = primitive->value_as_Prelu(); - MS_ASSERT(attr != nullptr); - - auto slope = std::make_unique>(); - for (int i = 0; i < static_cast(attr->slope()->size()); i++) { - slope->push_back(attr->slope()->data()[i]); - } - - auto val_offset = schema::CreatePreluDirect(fbb, slope.release()); - auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Prelu, val_offset.o); - fbb.Finish(prim_offset); - - auto buf = fbb.GetBufferPointer(); - MS_ASSERT(buf != nullptr); - auto buf_bak = new char[fbb.GetSize()]; - memcpy(buf_bak, buf, fbb.GetSize()); - - auto root = flatbuffers::GetRoot(buf_bak); - auto prim = const_cast(root); - - delete[] buf_bak; - fbb.Clear(); - return prim; - } -#endif - std::vector GetSlope() const; -}; -} // namespace lite -} // namespace mindspore -#endif // LITE_MINDSPORE_LITE_C_OPS_PRELU_H_ diff --git a/mindspore/lite/src/ops/primitive_c.cc b/mindspore/lite/src/ops/primitive_c.cc index b3f8100fd06be3e8cec853009c66fbc02f5a91c0..43a1ab53eeef900c5a59f77581beff16c17ab3ab 100644 --- a/mindspore/lite/src/ops/primitive_c.cc +++ b/mindspore/lite/src/ops/primitive_c.cc @@ -72,8 +72,8 @@ #include "src/ops/gather_nd.h" #include "src/ops/local_response_normalization.h" #include "src/ops/pad.h" -#include "src/ops/prelu.h" -#include "src/ops/caffe_p_relu.h" +#include "src/ops/p_relu.h" +#include "src/ops/leaky_relu.h" #include "src/ops/reverse_sequence.h" #include "src/ops/dedepthwise_conv2d.h" #include "src/ops/depthwise_conv2d.h" @@ -346,10 +346,10 @@ PrimitiveC *PrimitiveC::UnPackFromSchemaPrimitiveT(mindspore::schema::PrimitiveT return new Minimum(primitive); case schema::PrimitiveType_StridedSlice: return new StridedSlice(primitive); - case schema::PrimitiveType_Prelu: - return new Prelu(primitive); - case schema::PrimitiveType_CaffePReLU: - return new CaffePReLU(primitive); + case schema::PrimitiveType_LeakyReLU: + return new (std::nothrow) LeakyReLU(primitive); + case schema::PrimitiveType_PReLU: + return new (std::nothrow) PReLU(primitive); case schema::PrimitiveType_Round: return new Round(primitive); case schema::PrimitiveType_Reverse: @@ -554,10 +554,10 @@ PrimitiveC *PrimitiveC::UnPackFromSchemaPrimitive(mindspore::schema::Primitive * return new Minimum(const_cast(primitive)); case schema::PrimitiveType_StridedSlice: return new StridedSlice(const_cast(primitive)); - case schema::PrimitiveType_Prelu: - return new Prelu(const_cast(primitive)); - case schema::PrimitiveType_CaffePReLU: - return new CaffePReLU(const_cast(primitive)); + case schema::PrimitiveType_LeakyReLU: + return new (std::nothrow) LeakyReLU(const_cast(primitive)); + case schema::PrimitiveType_PReLU: + return new (std::nothrow) PReLU(const_cast(primitive)); case schema::PrimitiveType_Round: return new Round(const_cast(primitive)); case schema::PrimitiveType_Reverse: diff --git a/mindspore/lite/src/populate_parameter.cc b/mindspore/lite/src/populate_parameter.cc index 31c43fe55ce7ef0f65a493f2fe7c6de1275d6448..4c9290b9243cb61727a13369f802a8ff9c415a68 100644 --- a/mindspore/lite/src/populate_parameter.cc +++ b/mindspore/lite/src/populate_parameter.cc @@ -75,8 +75,8 @@ #include "src/ops/gather_nd.h" #include "src/ops/local_response_normalization.h" #include "src/ops/pad.h" -#include "src/ops/prelu.h" -#include "src/ops/caffe_p_relu.h" +#include "src/ops/leaky_relu.h" +#include "src/ops/p_relu.h" #include "src/ops/reverse_sequence.h" #include "src/ops/dedepthwise_conv2d.h" #include "src/ops/depthwise_conv2d.h" @@ -233,7 +233,7 @@ OpParameter *PopulateExpandDimsParameter(const mindspore::lite::PrimitiveC *prim } OpParameter *PopulatePReLUParameter(const mindspore::lite::PrimitiveC *primitive) { - auto param = dynamic_cast(primitive); + auto param = dynamic_cast(primitive); PReluParameter *prelu_param = reinterpret_cast(malloc(sizeof(PReluParameter))); if (prelu_param == nullptr) { MS_LOG(ERROR) << "malloc PReluParameter failed."; @@ -246,7 +246,7 @@ OpParameter *PopulatePReLUParameter(const mindspore::lite::PrimitiveC *primitive } OpParameter *PopulateLeakyReluParameter(const mindspore::lite::PrimitiveC *primitive) { - auto param = dynamic_cast(primitive); + auto param = dynamic_cast(primitive); LeakyReluParameter *leaky_relu_param = reinterpret_cast(malloc(sizeof(LeakyReluParameter))); if (leaky_relu_param == nullptr) { MS_LOG(ERROR) << "malloc LeakyReluParameter failed."; @@ -254,17 +254,14 @@ OpParameter *PopulateLeakyReluParameter(const mindspore::lite::PrimitiveC *primi } memset(leaky_relu_param, 0, sizeof(LeakyReluParameter)); leaky_relu_param->op_parameter_.type_ = primitive->Type(); - auto temp = param->GetSlope(); - leaky_relu_param->slope_ = reinterpret_cast(malloc(temp.size() * sizeof(float))); + leaky_relu_param->slope_ = reinterpret_cast(malloc(sizeof(float))); if (leaky_relu_param->slope_ == nullptr) { MS_LOG(ERROR) << "malloc relu slope fail!"; free(leaky_relu_param); return nullptr; } - for (size_t i = 0; i < temp.size(); i++) { - leaky_relu_param->slope_[i] = temp[i]; - } - leaky_relu_param->slope_num_ = temp.size(); + leaky_relu_param->slope_[0] = param->GetNegativeSlope(); + leaky_relu_param->slope_num_ = 1; return reinterpret_cast(leaky_relu_param); } @@ -1598,8 +1595,8 @@ PopulateParameterRegistry::PopulateParameterRegistry() { populate_parameter_funcs_[schema::PrimitiveType_ScatterND] = PopulateScatterNDParameter; populate_parameter_funcs_[schema::PrimitiveType_Squeeze] = PopulateSqueezeParameter; populate_parameter_funcs_[schema::PrimitiveType_Split] = PopulateSplitParameter; - populate_parameter_funcs_[schema::PrimitiveType_CaffePReLU] = PopulatePReLUParameter; - populate_parameter_funcs_[schema::PrimitiveType_Prelu] = PopulateLeakyReluParameter; + populate_parameter_funcs_[schema::PrimitiveType_PReLU] = PopulatePReLUParameter; + populate_parameter_funcs_[schema::PrimitiveType_LeakyReLU] = PopulateLeakyReluParameter; populate_parameter_funcs_[schema::PrimitiveType_PriorBox] = PopulatePriorBoxParameter; populate_parameter_funcs_[schema::PrimitiveType_QuantDTypeCast] = PopulateQuantDTypeCastParameter; populate_parameter_funcs_[schema::PrimitiveType_Lstm] = PopulateLstmParameter; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.cc index 5c9311308ca6acaa041bd91def3a545a0608423d..a13739bc6dccc80cd49123a2dff5e55738dc929f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/leaky_relu_base.cc @@ -29,7 +29,7 @@ using mindspore::schema::PrimitiveType_LeakyReLU; namespace mindspore::kernel { int LeakyReluBaseCPUKernel::Init() { return RET_OK; } -kernel::LiteKernel *CpuPreluInt8KernelCreator(const std::vector &inputs, +kernel::LiteKernel *CpuLeakyReluInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const Context *ctx, const kernel::KernelKey &desc, @@ -41,7 +41,7 @@ kernel::LiteKernel *CpuPreluInt8KernelCreator(const std::vectorInit(); @@ -54,5 +54,5 @@ kernel::LiteKernel *CpuPreluInt8KernelCreator(const std::vectorallocator->Free(indices_data_); + if (indices_data_ != nullptr) { + free(indices_data_); + indices_data_ = nullptr; + } } int GatherCPUKernel::ReSize() { return RET_OK; } @@ -102,7 +105,7 @@ int GatherCPUKernel::Run() { } auto indices_tensor = in_tensors_.at(1); - indices_data_ = reinterpret_cast(context_->allocator->Malloc(indices_tensor->Size())); + indices_data_ = reinterpret_cast(malloc(indices_tensor->Size())); if (indices_data_ == nullptr) { MS_LOG(ERROR) << "Memory allocation failed"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gather.h b/mindspore/lite/src/runtime/kernel/arm/fp32/gather.h index b492c4c179b5a2f6fd0b5dcb1773aa359d049423..334d93a648425b7a7bdd6245040ad2ddbccab5a5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gather.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gather.h @@ -36,7 +36,7 @@ class GatherCPUKernel : public LiteKernel { int DoGather(int task_id); private: - int *indices_data_; + int *indices_data_ = nullptr; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/leaky_relu.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/leaky_relu.cc index ec2a4cbc26f66edbcb5480bdc06971368e8ed1a4..ab8b01b598d211ff3213740dc33b7241a207a96b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/leaky_relu.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/leaky_relu.cc @@ -26,7 +26,6 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_LeakyReLU; -using mindspore::schema::PrimitiveType_Prelu; namespace mindspore::kernel { namespace { @@ -100,5 +99,4 @@ kernel::LiteKernel *CpuLeakyReluFp32KernelCreator(const std::vector(cdata); + relu->DoExecute(task_id); + return RET_OK; +} +} // namespace + int LeakyReluInt8CPUKernel::Init() { LeakyReluBaseCPUKernel::Init(); LeakyReluParameter *param = reinterpret_cast(op_parameter_); @@ -82,17 +93,12 @@ int LeakyReluInt8CPUKernel::Run() { MS_LOG(ERROR) << "Prepare fail!ret: " << ret; return ret; } - ret = ParallelLaunch(THREAD_POOL_DEFAULT, PreluInt8Run, this, op_parameter_->thread_num_); + ret = ParallelLaunch(THREAD_POOL_DEFAULT, LeakyReluInt8Run, this, op_parameter_->thread_num_); if (ret != RET_OK) { MS_LOG(ERROR) << "RunPreluParam failed. errorcode: "; } return RET_OK; } -int PreluInt8Run(void *cdata, int task_id) { - auto prelu = reinterpret_cast(cdata); - prelu->DoExecute(task_id); - return RET_OK; -} int LeakyReluInt8CPUKernel::DoExecute(int task_id) { auto input_tensor = in_tensors_.at(kInputIndex); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.h index c995f7313d45f2b595901d13ae1eb85bcebe3169..ba0282a096ef1aa8c2b27caeba161f58da22439e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.h @@ -41,7 +41,6 @@ class LeakyReluInt8CPUKernel : public LeakyReluBaseCPUKernel { private: LeakyReluQuantArg quant_prelu_parm_; }; -int PreluInt8Run(void *cdata, int task_id); } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_PRELU_INT8_H_ diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc index 9f398a958e6f4cac8b480a831a1659dec3bc4f11..78f7df0baf86ec13eb47f79633b413e053e162b3 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc @@ -29,7 +29,7 @@ using mindspore::kernel::KERNEL_ARCH::kGPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Prelu; +using mindspore::schema::PrimitiveType_PReLU; namespace mindspore::kernel { @@ -154,5 +154,5 @@ kernel::LiteKernel *OpenCLPReluKernelCreator(const std::vector(malloc(sizeof(float))); op_param.slope_[0] = 0.25; lite::Context *ctx = new lite::Context; ctx->thread_num_ = 2; op_param.axis_ = 0.25; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Prelu}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_LeakyReLU}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc index 84ec400efc545f3d46399d6cc6bdc95a18205ea0..7505ff4c9a97249081f78020c01af6a6a4d92334 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc @@ -119,15 +119,6 @@ TEST_F(TestTfliteParserPrelu, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Prelu) << "wrong Op Type"; -} - -TEST_F(TestTfliteParserPrelu, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsPrelu(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value; - std::vector slope(20, 0); - ASSERT_EQ(val.AsPrelu()->slope, slope); - ASSERT_EQ(val.type, schema::PrimitiveType_Prelu); } class TestTfliteParserLeakyRelu : public TestTfliteParser { diff --git a/mindspore/lite/tools/common/node_util.cc b/mindspore/lite/tools/common/node_util.cc index f4efe2dc5eb737c1ead5bfde86fae4c2170dc9b8..e112b307331cb698ce726b5154401de173a5a145 100644 --- a/mindspore/lite/tools/common/node_util.cc +++ b/mindspore/lite/tools/common/node_util.cc @@ -29,7 +29,7 @@ static const std::vector nhwcOpList = { schema::PrimitiveType_DepthwiseConv2D, schema::PrimitiveType_DeDepthwiseConv2D, schema::PrimitiveType_Pooling, schema::PrimitiveType_Resize, schema::PrimitiveType_BatchNorm, schema::PrimitiveType_FusedBatchNorm, - schema::PrimitiveType_CaffePReLU}; + schema::PrimitiveType_PReLU}; static const std::vector fp32FullOpList = { schema::PrimitiveType_Concat, schema::PrimitiveType_Add, diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc index 58e8c31749452d52cd995558d0352132bff95a78..2c1b11cc82e464b11aa581c6935bf3d2b9b43394 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc @@ -34,7 +34,7 @@ STATUS CaffePReluParser::Parse(const caffe::LayerParameter &proto, return RET_NULL_PTR; } - std::unique_ptr attr = std::make_unique(); + std::unique_ptr attr = std::make_unique(); if (attr == nullptr) { MS_LOG(ERROR) << "new op failed"; return RET_NULL_PTR; @@ -60,7 +60,7 @@ STATUS CaffePReluParser::Parse(const caffe::LayerParameter &proto, weightVec->push_back(slope); op->name = proto.name(); - op->primitive->value.type = schema::PrimitiveType_CaffePReLU; + op->primitive->value.type = schema::PrimitiveType_PReLU; op->primitive->value.value = attr.release(); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc index d14a5054a749d7c6876c81be024aa08e72a3ca93..06f5d460f54fa50215fe1841f4f1c01ba449808f 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc @@ -73,7 +73,7 @@ STATUS OnnxPReluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::No MS_LOG(ERROR) << "input num should be 2"; return RET_ERROR; } - std::unique_ptr attr = std::make_unique(); + std::unique_ptr attr = std::make_unique(); std::vector params; const auto &input_name = onnx_node.input(1); for (const auto &it : onnx_graph.initializer()) { @@ -102,7 +102,7 @@ STATUS OnnxPReluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::No } } - op->primitive->value.type = schema::PrimitiveType_CaffePReLU; + op->primitive->value.type = schema::PrimitiveType_PReLU; op->primitive->value.value = attr.release(); return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc index c79caf7344241e95e3d4add041c246554312e328..5a4adeec4db6a41dadd3b323c88a5ded34c4086b 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc @@ -84,52 +84,11 @@ STATUS TfliteActivationParser::Parse(const std::unique_ptr &t return RET_OK; } -STATUS TflitePreluParser::Parse(const std::unique_ptr &tflite_op, - const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) { - MS_LOG(DEBUG) << "parse TflitePreluParser"; - - if (op == nullptr) { - MS_LOG(ERROR) << "op is null"; - return RET_NULL_PTR; - } - op->primitive = std::make_unique(); - if (op->primitive == nullptr) { - MS_LOG(ERROR) << "op->primitive is null"; - return RET_NULL_PTR; - } - - std::unique_ptr attr = std::make_unique(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } - - if (GetTfliteData(tflite_op->inputs[1], tflite_tensors, tflite_model_buffer, attr->slope)) { - MS_LOG(ERROR) << "get pRelu -> slope failed"; - return RET_ERROR; - } - op->primitive->value.type = schema::PrimitiveType_Prelu; - op->primitive->value.value = attr.release(); - - AddOpInput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, - tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); - return RET_OK; -} - - TfliteNodeRegister g_TfliteReluParser("Relu", new TfliteReluParser()); TfliteNodeRegister g_TfliteRelu6Parser("Relu6", new TfliteRelu6Parser()); TfliteNodeRegister g_TfliteTanhParser("Tanh", new TfliteTanhParser()); TfliteNodeRegister g_TfliteHardSwishParser("HardSwish", new TfliteHardSwishParser()); TfliteNodeRegister g_tfliteLogisticParser("Logistic", new TfliteLogisticParser()); -TfliteNodeRegister g_tflitePreluParser("Prelu", new TflitePreluParser()); TfliteNodeRegister g_TfliteLeakyReluParser("LeakyRelu", new TfliteLeakyReluParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h index a47b0e1180897f13d018dc1b6e50fd7c69e3d29e..b1509599d93380fa309698fb607f9a3d70f32960 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h @@ -68,18 +68,6 @@ class TfliteLeakyReluParser : public TfliteActivationParser { TfliteLeakyReluParser() : TfliteActivationParser() {} }; -class TflitePreluParser : public TfliteNodeParser { - public: - TflitePreluParser() : TfliteNodeParser("Prelu") {} - - STATUS Parse(const std::unique_ptr &tflite_op, - const std::vector> &tflite_tensors, - const std::vector> &tflite_model_buffer, - schema::CNodeT *op, - std::vector *tensors_id, - std::vector *tensors_format, - std::map *tensors_id_map) override; -}; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc index eb9b247a1e8a1a4c6db87ed00c4165b1ed47b1a8..d987b6e8f97e4571e4b7f475d47ce4d5decfcc75 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc @@ -107,7 +107,6 @@ std::map tfMsOpTypeMap{ {tflite::BuiltinOperator_DEPTH_TO_SPACE, "DepthToSpace"}, {tflite::BuiltinOperator_SPACE_TO_BATCH_ND, "SpaceToBatchND"}, {tflite::BuiltinOperator_SPACE_TO_DEPTH, "SpaceToDepth"}, - {tflite::BuiltinOperator_PRELU, "Prelu"}, {tflite::BuiltinOperator_ROUND, "Round"}, {tflite::BuiltinOperator_WHERE, "Where"}, {tflite::BuiltinOperator_SPARSE_TO_DENSE, "SparseToDense"},