提交 521e351d 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!5593 fix some mistake of codex

Merge pull request !5593 from liuwenhao/master
......@@ -279,7 +279,7 @@ void ConvSWFp16(const float16_t *input_data, const float16_t *packed_weight, con
bool relu6 = conv_param->act_type_ == ActType_Relu6;
int oc4_res = conv_param->output_channel_ % C4NUM;
const float16_t *src = input_data;
float16_t *dst;
float16_t *dst = NULL;
if (oc4_res == 0) {
dst = output_data;
} else {
......
......@@ -17,8 +17,14 @@
#include "nnacl/fp32/common_func.h"
void PostConvFuncComm(const float *src_ptr_, float *out_ptr, const float *bias_ptr, size_t output_channel,
size_t plane_size, size_t stride, bool is_relu, bool is_relu6, int size) {
int oc_div = 0, oc_mod = 0;
for (int oc = 0; oc < output_channel; oc++) {
int oc_div = oc / size, oc_mod = oc % size;
if (size != 0) {
oc_div = oc / size;
oc_mod = oc % size;
} else {
return;
}
for (int hw = 0; hw < plane_size; hw++) {
int src_index = oc_div * size * plane_size + hw * size + oc_mod;
int dst_index = hw * stride + oc;
......
......@@ -35,7 +35,7 @@ int ROIPooling(float *in_ptr, float *out_ptr, float *roi, int tid, ROIPoolingPar
int scale = param->scale_;
int pooled_height = param->pooledH_;
int pooled_width = param->pooledW_;
int roi_stride = 5;
const int roi_stride = 5;
int roi_ind_st = roi_st * roi_stride;
float *max_c = malloc(channels_ * sizeof(float));
for (int i = roi_st; i < roi_end; ++i) {
......
......@@ -17,6 +17,9 @@
#include "nnacl/int8/leaky_relu_int8.h"
void DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_prelu_parm, int task_id) {
if (quant_prelu_parm == NULL) {
return;
}
float output_scale = quant_prelu_parm->quant_arg.out_args_.scale_;
int output_zp = quant_prelu_parm->quant_arg.out_args_.zp_;
const float output_inverse_scale = 1.f / output_scale;
......
......@@ -328,7 +328,7 @@ void CalcWeightBiasSums(int8_t *weight, int row, int col, int input_zp, int weig
}
}
dst[c] = row * input_zp * weight_zp - input_zp * sum;
if (bias) {
if (bias != NULL) {
dst[c] += bias[c];
}
}
......
......@@ -218,8 +218,8 @@ void Conv2D::PopulaterConv2DSingleGroup(const Primitive &prim, schema::Primitive
}
void Conv2D::CalQuantParam(const double &mean, const double &stdDev, float *mMin, float *mMax) {
constexpr float qmin = 0;
constexpr float qmax = 255;
const float qmin = 0;
const float qmax = 255;
*mMin = static_cast<float>((qmin - mean) / stdDev);
*mMax = static_cast<float>((qmax - mean) / stdDev);
}
......
......@@ -70,8 +70,8 @@ void DepthwiseConv2D::SetActivationType(int activation_type) {
}
void DepthwiseConv2D::CalQuantParam(const double &mean, const double &stdDev, float *mMin, float *mMax) {
constexpr float qmin = 0;
constexpr float qmax = 255;
const float qmin = 0;
const float qmax = 255;
*mMin = static_cast<float>((qmin - mean) / stdDev);
*mMax = static_cast<float>((qmax - mean) / stdDev);
}
......
......@@ -31,8 +31,8 @@ void MatMul::SetTransposeA(bool transpose_a) { this->primitive_->value.AsMatMul(
void MatMul::SetTransposeB(bool transpose_b) { this->primitive_->value.AsMatMul()->transposeB = transpose_b; }
void MatMul::CalQuantParam(const double &mean, const double &stdDev, float *mMin, float *mMax) {
constexpr float qmin = 0;
constexpr float qmax = 255;
const float qmin = 0;
const float qmax = 255;
*mMin = static_cast<float>((qmin - mean) / stdDev);
*mMax = static_cast<float>((qmax - mean) / stdDev);
}
......
......@@ -119,8 +119,8 @@ int ToFormatOpenCLKernel::GetImageSize(size_t idx, std::vector<size_t> *img_size
im_dst_x = w * UP_DIV(c, C4NUM);
im_dst_y = h;
} else if (out_tensors_[0]->GetFormat() == schema::Format_NC4) {
int h = 1;
int w = 1;
const int h = 1;
const int w = 1;
int c = shapex[1];
im_dst_x = w * UP_DIV(c, C4NUM);
im_dst_y = h;
......
......@@ -109,7 +109,7 @@ int SubGraphOpenCLKernel::GenToFormatOp(const std::vector<lite::tensor::Tensor *
parameter->dst_format = dst_format;
parameter->out_mem_type = mem_type;
out_parameters->emplace_back(parameter);
LiteKernel *in_convert_op;
LiteKernel *in_convert_op = nullptr;
if (mem_type == OpenCLMemType::IMG) {
in_convert_op =
lite::GetOpenCLKernel({in_tensors[i]}, {new_tensor}, reinterpret_cast<OpParameter *>(parameter), nullptr, desc);
......
......@@ -198,7 +198,7 @@ int OpenCLRuntime::Init() {
MS_LOG(INFO) << "Compute Unit: " << compute_units_;
MS_LOG(INFO) << "Clock Frequency: " << max_freq_ << " MHz";
cl_command_queue_properties properties = 0;
const cl_command_queue_properties properties = 0;
#if MS_OPENCL_PROFILE
properties |= CL_QUEUE_PROFILING_ENABLE;
#endif
......
......@@ -402,6 +402,7 @@ STATUS OnnxModelParser::CopyOnnxTensorData(const onnx::TensorProto &onnx_const_v
data_size = data_count * sizeof(int32_t);
buffer = std::make_unique<int32_t[]>(data_count);
const int64_t *in_data;
in_data = nullptr;
if (onnx_const_value.int64_data_size() == 0) {
in_data = reinterpret_cast<const int64_t *>(onnx_const_value.raw_data().data());
} else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册