提交 e540b88e 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!5703 quantize parser and l2_norm op

Merge pull request !5703 from zhaodezan/master
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "nnacl/l2_norm.h"
#include <math.h>
int L2NormFp32(const float *input_ptr, float *output_ptr,
L2NormParameter *param) {
int *axis = param->axis_;
size_t axis_num = param->axis_num_;
float epsilon = param->epsilon_;
int shape_num = param->shape_num_;
// default case, axis is set default
if (shape_num == axis_num) {
bool default_case_flag = true;
for (int i = 0; i < axis_num; i++) {
if (axis[i] != i) {
default_case_flag = false;
}
}
if (default_case_flag) {
int data_num = param->data_num_;
float sum = 0;
for (int i = 0; i < data_num; i++) {
sum = sum + input_ptr[i] * input_ptr[i];
}
float res = sqrt(sum > epsilon ? sum : epsilon);
for (int i = 0; i < data_num; i++) {
output_ptr[i] = input_ptr[i] / res;
}
return 0;
}
} else {
return -1;
}
return 0;
}
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_NNACL_L2NORM_H_
#define MINDSPORE_LITE_NNACL_L2NORM_H_
#include "nnacl/l2_norm_parameter.h"
#ifdef __cplusplus
extern "C" {
#endif
int L2NormFp32(const float *input_ptr, float *output_ptr,
L2NormParameter *param);
#ifdef __cplusplus
}
#endif
#endif // MINDSPORE_LITE_NNACL_L2NORM_H_
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_NNACL_L2NORM_PARAMETER_H_
#define MINDSPORE_LITE_NNACL_L2NORM_PARAMETER_H_
#include "nnacl/op_base.h"
typedef struct L2NormParameter {
OpParameter op_parameter_;
int *axis_;
size_t axis_num_;
float epsilon_;
float data_num_;
int *shape_;
int shape_num_;
} L2NormParameter;
#endif // MINDSPORE_LITE_NNACL_L2NORM_PARAMETER_H_
......@@ -112,6 +112,7 @@
#include "src/ops/ceil.h"
#include "src/ops/round.h"
#include "src/ops/sparse_to_dense.h"
#include "src/ops/l2_norm.h"
#include "nnacl/op_base.h"
#include "nnacl/fp32/arg_min_max.h"
#include "nnacl/fp32/cast.h"
......@@ -169,6 +170,7 @@
#include "nnacl/fp32/elu.h"
#include "nnacl/leaky_relu_parameter.h"
#include "nnacl/sparse_to_dense.h"
#include "nnacl/l2_norm_parameter.h"
namespace mindspore::kernel {
......@@ -1513,6 +1515,33 @@ OpParameter *PopulateEluParameter(const mindspore::lite::PrimitiveC *primitive)
return reinterpret_cast<OpParameter *>(elu_parameter);
}
OpParameter *PopulateL2NormParameter(
const mindspore::lite::PrimitiveC *primitive) {
L2NormParameter *l2_norm_parameter =
reinterpret_cast<L2NormParameter *>(malloc(sizeof(L2NormParameter)));
if (l2_norm_parameter == nullptr) {
MS_LOG(ERROR) << "malloc L2NormParameter failed.";
return nullptr;
}
memset(l2_norm_parameter, 0, sizeof(L2NormParameter));
l2_norm_parameter->op_parameter_.type_ = primitive->Type();
auto param = reinterpret_cast<mindspore::lite::L2Norm *>(
const_cast<mindspore::lite::PrimitiveC *>(primitive));
auto axis_vec = param->GetAxis();
l2_norm_parameter->axis_num_ = axis_vec.size();
l2_norm_parameter->axis_ =
reinterpret_cast<int *>(malloc(axis_vec.size() * sizeof(int)));
for (size_t i = 0; i < axis_vec.size(); i++) {
l2_norm_parameter->axis_[i] = axis_vec[i];
}
if (param->GetEpsilon() < 1e-12) {
l2_norm_parameter->epsilon_ = 1e-12;
} else {
l2_norm_parameter->epsilon_ = param->GetEpsilon();
}
return reinterpret_cast<OpParameter *>(l2_norm_parameter);
}
PopulateParameterRegistry::PopulateParameterRegistry() {
populate_parameter_funcs_[schema::PrimitiveType_SparseToDense] = PopulateSparseToDenseParameter;
populate_parameter_funcs_[schema::PrimitiveType_SoftMax] = PopulateSoftmaxParameter;
......@@ -1610,6 +1639,7 @@ PopulateParameterRegistry::PopulateParameterRegistry() {
populate_parameter_funcs_[schema::PrimitiveType_Lstm] = PopulateLstmParameter;
populate_parameter_funcs_[schema::PrimitiveType_EmbeddingLookup] = PopulateEmbeddingLookupParameter;
populate_parameter_funcs_[schema::PrimitiveType_Elu] = PopulateEluParameter;
populate_parameter_funcs_[schema::PrimitiveType_L2Norm] = PopulateL2NormParameter;
}
PopulateParameterRegistry *PopulateParameterRegistry::GetInstance() {
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include "src/runtime/kernel/arm/fp32/l2_norm.h"
#include "include/errorcode.h"
#include "nnacl/l2_norm.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_L2Norm;
namespace mindspore::kernel {
int L2NormCPUKernel::Init() {
l2_norm_param_->data_num_ = in_tensors_.at(kInputIndex)->DataSize();
auto shape = in_tensors_.at(kInputIndex)->shape();
l2_norm_param_->shape_ = reinterpret_cast<int *>(malloc(shape.size() * sizeof(int)));
l2_norm_param_->shape_num_ = shape.size();
for (size_t i = 0; i < shape.size(); i++) {
l2_norm_param_->shape_[i] = shape[i];
}
return RET_OK;
}
kernel::LiteKernel *
CpuL2NormFp32KernelCreator(const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs,
OpParameter *param, const lite::Context *ctx,
const kernel::KernelKey &desc,
const mindspore::lite::PrimitiveC *primitive) {
if (param == nullptr) {
MS_LOG(ERROR) << "input param is nullptr!";
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_L2Norm);
auto *kernel = new (std::nothrow)
L2NormCPUKernel(param, inputs, outputs, ctx, primitive);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new L2NormCPUKernel fail!";
return nullptr;
}
auto ret = kernel->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init kernel failed, name: " << param->name_ << ", type: "
<< schema::EnumNamePrimitiveType(
static_cast<schema::PrimitiveType>(param->type_));
delete kernel;
return nullptr;
}
return kernel;
}
int L2NormCPUKernel::Run() {
auto ret = Prepare();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Prepare fail!ret: " << ret;
return ret;
}
auto input_ptr =
reinterpret_cast<float *>(in_tensors_.at(kInputIndex)->Data());
auto output_ptr =
reinterpret_cast<float *>(out_tensors_.at(kOutputIndex)->Data());
ret = L2NormFp32(input_ptr, output_ptr, l2_norm_param_);
if (ret != 0) {
MS_LOG_ERROR << "unsupported axis setting, more work will be done";
return ret;
}
return RET_OK;
}
L2NormCPUKernel::~L2NormCPUKernel() {
if (l2_norm_param_->shape_ != nullptr) {
free(l2_norm_param_->shape_);
}
if (l2_norm_param_->axis_ != nullptr) {
free(l2_norm_param_->axis_);
}
}
REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_L2Norm,
CpuL2NormFp32KernelCreator)
} // namespace mindspore::kernel
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_RESHAPE_H_
#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_RESHAPE_H_
#include <vector>
#include "include/context.h"
#include "src/lite_kernel.h"
#include "nnacl/l2_norm_parameter.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
using mindspore::lite::Context;
namespace mindspore::kernel {
class L2NormCPUKernel : public LiteKernel {
public:
L2NormCPUKernel(OpParameter *parameter,
const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs,
const Context *ctx,
const mindspore::lite::PrimitiveC *primitive)
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {
l2_norm_param_ = reinterpret_cast<L2NormParameter *>(op_parameter_);
}
~L2NormCPUKernel();
int Init() override;
int ReSize() override { return 0; }
int Run() override;
private:
L2NormParameter * l2_norm_param_;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_RESHAPE_H_
......@@ -103,3 +103,5 @@ mnasnet_1.0_160_1_metadata_1.tflite
mnasnet_1.0_192_1_metadata_1.tflite
mnasnet_1.0_224_1_metadata_1.tflite
mnasnet_1.0_96_1_metadata_1.tflite
lite-model_on_device_vision_classifier_popular_us_products_V1_1.tflite
lite-model_on_device_vision_classifier_popular_wine_V1_1.tflite
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tools/converter/parser/tflite/tflite_quantize_parser.h"
#include <vector>
#include <memory>
#include <map>
namespace mindspore {
namespace lite {
STATUS TfliteQuantizeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op,
const std::vector<std::unique_ptr<tflite::TensorT>> &tflite_tensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tflite_model_buffer,
schema::CNodeT *op,
std::vector<int32_t> *tensors_id,
std::vector<schema::Format> *tensors_format,
std::map<int, int> *tensors_id_map) {
MS_LOG(DEBUG) << "parse TfliteQuantizeNParser";
if (op == nullptr) {
MS_LOG(ERROR) << "op is null";
return RET_NULL_PTR;
}
op->primitive = std::make_unique<schema::PrimitiveT>();
if (op->primitive == nullptr) {
MS_LOG(ERROR) << "op->primitive is null";
return RET_NULL_PTR;
}
std::unique_ptr<schema::QuantDTypeCastT> attr = std::make_unique<schema::QuantDTypeCastT>();
if (attr == nullptr) {
MS_LOG(ERROR) << "new op failed";
return RET_NULL_PTR;
}
const auto &in_tensor = tflite_tensors[tflite_op->inputs[0]];
if (in_tensor == nullptr) {
MS_LOG(ERROR) << "input tensor is null";
return RET_NULL_PTR;
}
attr->srcT = GetTfliteDataType(in_tensor->type);
const auto &out_tensor = tflite_tensors[tflite_op->outputs[0]];
if (out_tensor == nullptr) {
MS_LOG(ERROR) << "output tensor is null";
return RET_NULL_PTR;
}
attr->dstT = GetTfliteDataType(out_tensor->type);
op->primitive->value.type = schema::PrimitiveType_QuantDTypeCast;
op->primitive->value.value = attr.release();
AddOpInput(op, tensors_id, tensors_format, tensors_id_map,
tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
AddOpOutput(op, tensors_id, tensors_format, tensors_id_map,
tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
return RET_OK;
}
TfliteNodeRegister g_tfliteQuantizeParser("QUANTIZE", new TfliteQuantizeParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_QUANTIZE_PARSER_H
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_QUANTIZE_PARSER_H
#include <vector>
#include <memory>
#include <map>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteQuantizeParser : public TfliteNodeParser {
public:
TfliteQuantizeParser() : TfliteNodeParser("Quantize") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op,
const std::vector<std::unique_ptr<tflite::TensorT>> &tflite_tensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tflite_model_buffer,
schema::CNodeT *op,
std::vector<int32_t> *tensors_id,
std::vector<schema::Format> *tensors_format,
std::map<int, int> *tensors_id_map) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_QUANTIZE_PARSER_H
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册