提交 46df711b 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!4832 remove all todos

Merge pull request !4832 from wangzhe/master
......@@ -263,7 +263,6 @@ OpParameter *PopulatePoolingParameter(const mindspore::lite::PrimitiveC *primiti
pooling_param->global_ = pooling_primitive->GetGlobal();
pooling_param->window_w_ = pooling_primitive->GetWindowW();
pooling_param->window_h_ = pooling_primitive->GetWindowH();
// todo format
auto pooling_lite_primitive = (lite::Pooling *)primitive;
MS_ASSERT(nullptr != pooling_lite_primitive);
pooling_param->pad_u_ = pooling_lite_primitive->PadUp();
......@@ -402,7 +401,6 @@ OpParameter *PopulateConvDwParameter(const mindspore::lite::PrimitiveC *primitiv
auto conv_primitive = dynamic_cast<const mindspore::lite::DepthwiseConv2D *>(primitive);
conv_param->kernel_h_ = conv_primitive->GetKernelH();
conv_param->kernel_w_ = conv_primitive->GetKernelW();
// todo format, group
conv_param->stride_h_ = conv_primitive->GetStrideH();
conv_param->stride_w_ = conv_primitive->GetStrideW();
......
......@@ -101,7 +101,6 @@ int PadCPUKernel::Run() {
int output_size = output->DataSize();
auto output_data = reinterpret_cast<float *>(output->Data());
// todo parallel memset to save time
memset(output_data, 0, output_size * sizeof(float));
int error_code = LiteBackendParallelLaunch(PadImpl, this, context_->thread_num_);
......
......@@ -87,7 +87,6 @@ int ScatterNDCPUKernel::ReSize() {
return RET_ERROR;
}
}
// todo check indeices out of range
// for (size_t i = 0; i < static_cast<size_t>(indice_unit_rank); i++) {}
// calculate unit_size_
......
......@@ -332,7 +332,6 @@ void ConvFp16(float16_t *input_data, float16_t *packed_input, float16_t *packed_
int out_channel = conv_param->output_channel_;
bool relu = conv_param->is_relu_;
bool relu6 = conv_param->is_relu6_;
// todo
int thread_count = conv_param->thread_num_;
const int tile_n = 16;
int output_count = out_h * out_w;
......
......@@ -379,7 +379,6 @@ int BroadcastSub(float *input0, float *input1, float *tile_input0, float *tile_i
return ElementSub(tile_input0, tile_input1, output, element_size);
}
// todo c=a/b,if(b==0)
int ElementDiv(float *input0, float *input1, float *output, int element_size) {
for (int i = 0; i < element_size; i++) {
if (input1[i] == 0) {
......
......@@ -423,7 +423,6 @@ void ConvDw3x3Fp32InputTrans(const float *input_data, float *trans_input, float
}
}
// todo yangruoqi: implement assembly
void ConvDw3x3Fp32Winograd(float *trans_buffer, const float *weight, int out_h_block, int out_w_block) {
const int unit = 4;
for (int oh = 0; oh < out_h_block; oh++) {
......
......@@ -152,7 +152,7 @@ void DeConvWeightTransInt8(int8_t *src, int8_t *dst, int input_channel, int outp
}
}
} else {
/* todo normal int8 deconv */
/* normal int8 deconv */
}
return;
}
......@@ -171,7 +171,7 @@ void DeConvPackWeightSum(int8_t *weight, int32_t *weight_sum, int32_t input_zp,
weight_sum[c] = filter_zp * input_zp * deep16 - value * input_zp;
}
} else {
/* todo normal int8 deconv */
/* normal int8 deconv */
}
return;
}
......@@ -188,7 +188,7 @@ void DeConvPackInputSum(const int8_t *src, int32_t *dst, int32_t filter_zp, int
dst[r] = tmp_value * filter_zp;
}
} else {
/* todo normal int8 deconv */
/* normal int8 deconv */
}
return;
}
......@@ -199,7 +199,7 @@ int DeConvInt8(const int8_t *input, const int8_t *weight, int32_t *output, int32
if (matmul_func != NULL) {
matmul_func(input, weight, output, act_row, act_col, act_deep, input_sum, weight_sum);
} else {
/* todo normal int8 deconv */
/* normal int8 deconv */
}
return NNACL_OK;
}
......@@ -210,7 +210,7 @@ int DeConvPostInt8(const int32_t *src, const int32_t *bias, int32_t *tmp, int8_t
if (support_optimize) {
error_code = DeConvPostInt8C4(src, bias, tmp, out, output_channel, conv_param);
} else {
/* todo normal int8 deconv post */
/* normal int8 deconv post */
}
return error_code;
}
......@@ -377,7 +377,7 @@ int ReduceProdInt8(const int outer_size, const int inner_size, const int axis_si
if (isAddOverflow(prod, quant->in_zp_)) {
return NNACL_ERRCODE_ADD_OVERFLOW;
}
*inner_dst = prod + quant->in_zp_; // todo overflow
*inner_dst = prod + quant->in_zp_;
}
}
return NNACL_OK;
......
......@@ -130,7 +130,6 @@ int OpenCLExecutor::TransformTensorLayoutToBuffer(tensor::Tensor *tensor, schema
tensor->SetFormat(dst_format);
return RET_OK;
} else if (dst_format == schema::Format_NHWC) {
// TODO(wandongdong): add support !!
return RET_OK;
} else {
MS_LOG(ERROR) << "Unsupported layout transform: " << schema::EnumNameFormat(tensor->GetFormat()) << " to "
......@@ -200,7 +199,6 @@ int OpenCLExecutor::TransformTensorLayoutUint8(tensor::Tensor *tensor, schema::F
MS_ASSERT(nullptr != tensor);
MS_ASSERT(4 == tensor->shape().size());
// auto src_format = tensor->GetFormat();
// todo
MS_LOG(ERROR) << "Unsupported layout transform: " << schema::EnumNameFormat(tensor->GetFormat()) << " to "
<< schema::EnumNameFormat(dst_format) << " in uint8";
return RET_ERROR;
......
......@@ -138,7 +138,6 @@ TEST_F(TestPack, PackWeightFp32) {
#ifdef ENABLE_FP16
TEST_F(TestPack, PackInputFp16) {
// todo
size_t input_size;
std::string input_path = "./test_data/conv/convfp32_input_1_28_28_3.bin";
auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size));
......
......@@ -328,7 +328,6 @@ TEST_F(TestConvolutionFp16, ConvTest2) {
TEST_F(TestConvolutionFp16, Conv3x3Test1) {
auto conv_param = new ConvParameter();
InitConvParamGroup1Fp16(conv_param);
// todo
int thread_count = 1;
int tile_num = 16;
int output_batch = conv_param->output_batch_;
......@@ -474,7 +473,6 @@ TEST_F(TestConvolutionFp16, Conv3x3Test1) {
TEST_F(TestConvolutionFp16, Conv3x3Test2) {
auto conv_param = new ConvParameter();
InitConvParamGroup2Fp16(conv_param);
// todo
int thread_count = 1;
int tile_num = 16;
int output_batch = conv_param->output_batch_;
......
......@@ -90,7 +90,7 @@ TEST_F(TestResizeBilinearInt8, Bilinear0) {
int8_t expect[16] = {4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 5, 5, 6, 6};
Prepare(in_shape, out_shape, input_data, output_data, quant_in, quant_out, align_corners, thread_num);
kernel_->Init(); // todo delete
kernel_->Init();
kernel_->Run();
CompareOutputInt8(output_data, expect, 16, err_percent_);
......
......@@ -92,7 +92,7 @@ TEST_F(TestResizeNearestNeighborInt8, NearestNeighbor0) {
err_percent_ = 0.25f;
Prepare(in_shape, out_shape, input_data, output_data, quant_in, quant_out, false, thread_num);
kernel_->Init(); // todo delete
kernel_->Init();
kernel_->Run();
CompareOutputInt8(output_data, expect, 16, err_percent_);
......
......@@ -348,7 +348,7 @@ STATUS BatchNormFoldFusionPass::GenNewBiasTensor() { // bias has no quant
MS_LOG(ERROR) << "new BiasTensor failed";
return RET_ERROR;
}
newBiasTensor->dataType = 0; // todo is float
newBiasTensor->dataType = 0;
newBiasTensor->format = Format_NUM_OF_FORMAT;
newBiasTensor->refCount = schema::NodeType_ValueNode;
newBiasTensor->dims = biasShape;
......
......@@ -29,7 +29,7 @@ void WeightFormatHardCodePass::SetFmkType(converter::FmkType fmkType) { this->fm
// pre set tensor format
// non quant, filterFormat:
// conv deconv depth dedepth
// caffe K(C/g)HW C(K/g)HW / / // todo with deconvOp
// caffe K(C/g)HW C(K/g)HW / /
// tf HWCK HWKC HWCK HWKC
// onnx K(C/g)HW C(K/g)HW / /
......
......@@ -174,7 +174,6 @@ STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto, const caff
break;
}
}
// todo y00520784 : layer.input_param().shape(0)
if (layer.type() == "Input") {
std::unique_ptr<schema::TensorT> msTensor = std::make_unique<schema::TensorT>();
for (int j = 0; j < layer.input_param().shape(0).dim_size(); j++) {
......
......@@ -43,7 +43,6 @@ STATUS CaffeScaleParser::Parse(const caffe::LayerParameter &proto, const caffe::
attr->axis = axis;
// parse scale
// todo expect only weight as scale not bias
if (weight.blobs().size() == 1) {
auto scale = ConvertWeight(weight.blobs(0));
if (scale == nullptr) {
......
......@@ -66,8 +66,7 @@ STATUS OnnxDivParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
STATUS OnnxPowParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx PowParser";
if (op != nullptr) {
// TODO(wangzhe) attr power need populate
std::unique_ptr<schema::PowerT> attr = std::make_unique<schema::PowerT>();
std::unique_ptr<schema::PowerT> attr(new schema::PowerT());
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Power;
op->primitive->value.value = attr.release();
......
......@@ -65,7 +65,6 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
MS_LOG(ERROR) << "dilations size " << onnx_node_attr.ints().size() << " is not 2";
return RET_ERROR;
}
// TODO(wangzhe) verify the change
attr->dilateH = static_cast<int32_t>(onnx_node_attr.ints(0));
attr->dilateW = static_cast<int32_t>(onnx_node_attr.ints(1));
} else if (onnx_node_attr.name() == "kernels") {
......@@ -80,7 +79,6 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
MS_LOG(ERROR) << "kernel_shape size " << onnx_node_attr.ints().size() << " is not 2";
return RET_ERROR;
}
// TODO(wangzhe) verify the change
attr->kernelH = static_cast<int32_t>(onnx_node_attr.ints(0));
attr->kernelW = static_cast<int32_t>(onnx_node_attr.ints(1));
} else if (onnx_node_attr.name() == "auto_pad") {
......@@ -99,7 +97,6 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
MS_LOG(ERROR) << "strides size " << onnx_node_attr.ints().size() << " is not 2";
return RET_ERROR;
}
// TODO(wangzhe) verify the change
attr->strideH = static_cast<int32_t>(onnx_node_attr.ints(0));
attr->strideW = static_cast<int32_t>(onnx_node_attr.ints(1));
} else if (onnx_node_attr.name() == "order") {
......@@ -143,7 +140,6 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
dims.insert(dims.begin(), iter->ints().begin(), iter->ints().end());
}
attr->channelOut = dims[0];
// TODO(wangzhe) verify this code
attr->channelIn = dims[3] * attr->group;
}
attr->format = schema::Format_NCHW;
......
......@@ -241,7 +241,6 @@ STATUS OnnxModelParser::ParseOnnxGivenFillNode(const onnx::NodeProto &onnx_node,
std::for_each(shape.begin(), shape.end(), [&data_count](int dim) { data_count *= dim; });
size_t data_size = 0;
if (onnx_node.op_type() == "Int8GivenIntTensorFill") {
// todo how to read onnx-ori-dataType
tensor->dataType = kNumberTypeInt32;
data_size = data_count * sizeof(int32_t) / sizeof(uint8_t);
tensor->data.resize(data_size);
......@@ -252,9 +251,7 @@ STATUS OnnxModelParser::ParseOnnxGivenFillNode(const onnx::NodeProto &onnx_node,
castedTensorData[i] = int32_t(iter->ints().data()[i]);
}
} else if (onnx_node.op_type() == "Int8GivenTensorFill") {
// todo how to read onnx-ori-dataType
tensor->dataType = kNumberTypeUInt8;
// todo: add * sizof(string)
data_size = data_count;
tensor->data.resize(data_size);
MS_LOG(DEBUG) << "tensor data size " << data_size << ", s: " << sizeof(iter->s().data());
......
......@@ -65,7 +65,7 @@ STATUS OnnxPReluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::No
if (slope_size == 1) {
attr->slope.push_back(*slope_raw_data);
attr->channelShared = true;
} else { // TODO(wangzhe) we don't check input tensor's channel size, this may cause problem
} else {
attr->slope.resize(slope_size);
attr->channelShared = false;
if (memcpy_s(attr->slope.data(), slope_size * sizeof(float), slope_raw_data, slope_size * sizeof(float)) != 0) {
......
......@@ -26,7 +26,6 @@ STATUS OnnxReshapeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::
std::unique_ptr<schema::ReshapeT> attr = std::make_unique<schema::ReshapeT>();
attr->format = schema::Format_NCHW;
std::vector<onnx::TensorProto> params;
// TODO(wangzhe) shape may also come from other op, there need refactor to introduce tensor_cache
for (int i = 0; i < onnx_node.input_size(); ++i) {
const auto &input_name = onnx_node.input(i);
for (const auto &it : onnx_graph.initializer()) {
......
......@@ -247,7 +247,7 @@ enum BuiltinOperator : byte {
SPACE_TO_DEPTH = 26,
SVDF = 27,
TANH = 28,
// TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS
// Consider rename to CONCATENATE_EMBEDDINGS
CONCAT_EMBEDDINGS = 29,
SKIP_GRAM = 30,
CALL = 31,
......
......@@ -168,7 +168,6 @@ int LinearCalcer::Calc(MetaGraphT *graph, const CNodeT &node) {
if (outQuantParam->inited) {
continue;
}
// todo copy quant params
outTensor->quantParams.front() = std::move(outQuantParam);
}
}
......
......@@ -802,7 +802,6 @@ STATUS PostTrainingQuantizer::CheckTensorVec(const std::string &nodeName,
**/
STATUS PostTrainingQuantizer::DoInference() {
for (size_t i = 0; i < calibrator_->GetBatchNum(); i++) {
// TODO(x) when model has inputs count > 1
// get input tensor
vector<mindspore::tensor::MSTensor *> inputs = session_->GetInputs();
if (inputs.size() > 1) {
......@@ -854,7 +853,6 @@ STATUS PostTrainingQuantizer::DoInference() {
STATUS PostTrainingQuantizer::CollectDataFrequency() {
for (size_t i = 0; i < calibrator_->GetBatchNum(); i++) {
// TODO(x) when model has inputs count > 1
// get input tensor
vector<mindspore::tensor::MSTensor *> inputs = session_->GetInputs();
if (inputs.size() > 1) {
......
......@@ -33,11 +33,11 @@ bool NodePass::Run(const FuncGraphPtr &func_graph) {
manager->AddFuncGraph(func_graph);
std::unordered_set<AnfNodePtr> seen_node;
std::deque<AnfNodePtr> todo{func_graph->output()};
std::deque<AnfNodePtr> to_process{func_graph->output()};
bool changes = false;
while (!todo.empty()) {
AnfNodePtr node = todo.front();
todo.pop_front();
while (!to_process.empty()) {
AnfNodePtr node = to_process.front();
to_process.pop_front();
if (seen_node.count(node) > 0 || !manager->all_nodes().contains(node)) {
continue;
}
......@@ -53,15 +53,15 @@ bool NodePass::Run(const FuncGraphPtr &func_graph) {
if (new_node && IsValueNode<FuncGraph>(new_node)) {
auto const_func_graph = GetValueNode<FuncGraphPtr>(new_node);
MS_EXCEPTION_IF_NULL(const_func_graph);
todo.push_back(const_func_graph->output());
to_process.push_back(const_func_graph->output());
} else if (new_node && new_node->isa<CNode>()) {
if (IsGraphKernel(new_node)) {
todo.push_back(new_node);
to_process.push_back(new_node);
}
auto cnode = new_node->cast<CNodePtr>();
MS_EXCEPTION_IF_NULL(cnode);
auto inputs = cnode->inputs();
(void)todo.insert(todo.end(), inputs.begin(), inputs.end());
(void)to_process.insert(to_process.end(), inputs.begin(), inputs.end());
}
changes = changes || change;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册