未验证 提交 2ecc4150 编写于 作者: B BUG1989 提交者: GitHub

add op test, reciprocal, softplus (#666)

* add op test, reciprocal, softplus

* fix, wider type

* fix, remove fewer arguments from tm2_softplus

* add optest, abs,acos,asin,atan,ceil,cos,log,pow,selu,sub
上级 a8249eb1
......@@ -474,7 +474,6 @@ int main(int argc, char* argv[])
if (graph == nullptr)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -64,7 +64,6 @@ int tengine_classify(const char* model_file, const char* image_file, int img_h,
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -73,7 +73,6 @@ int tengine_classify(const char* model_file, const char* image_file, int img_h,
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -73,7 +73,6 @@ int tengine_classify(const char* model_file, const char* image_file, int img_h,
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -76,7 +76,6 @@ int tengine_classify(const char* model_file, const char* image_file, int img_h,
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -84,7 +84,6 @@ int tengine_classify(const char* model_file, const char* image_file, int img_h,
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -94,7 +94,6 @@ int tengine_classify(const char* model_file, const char* image_file, int img_h,
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -74,7 +74,6 @@ int tengine_classify(const char* model_file, const char* image_file, int img_h,
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -84,7 +84,6 @@ int tengine_classify(const char* model_file, const char* image_file, int img_h,
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -67,7 +67,6 @@ int tengine_classify(const char* model_file, const char* image_file, int img_h,
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -231,7 +231,6 @@ int main(int argc, char* argv[])
if (graph == nullptr)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -141,7 +141,6 @@ int main(int argc, char* argv[])
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -169,7 +169,6 @@ int main(int argc, char* argv[])
if (graph == NULL)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -182,7 +182,6 @@ int main(int argc, char* argv[])
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -308,7 +308,6 @@ int main(int argc, char* argv[])
if (graph == NULL)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -101,7 +101,6 @@ int tengine_segment(const char* model_file, const char* image_file, int img_h, i
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -245,7 +245,6 @@ static int detect_yolact(const cv::Mat& bgr, std::vector<Object>& objects, const
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -565,7 +565,6 @@ int main(int argc, char* argv[])
if (graph == nullptr)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -714,7 +714,6 @@ int main(int argc, char* argv[])
if (graph == nullptr)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -722,7 +722,6 @@ int main(int argc, char *argv[])
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -715,7 +715,6 @@ int main(int argc, char* argv[])
if (graph == nullptr)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -729,7 +729,6 @@ int main(int argc, char* argv[])
if (graph == nullptr)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -437,7 +437,6 @@ int main(int argc, char* argv[])
if (graph == nullptr)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -375,7 +375,6 @@ int main(int argc, char* argv[])
if (graph == nullptr)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -820,7 +820,6 @@ int main(int argc, char* argv[])
if (graph == nullptr)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -345,7 +345,6 @@ int main(int argc, char* argv[])
if (graph == nullptr)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -426,7 +426,6 @@ int main(int argc, char* argv[])
if (graph == nullptr)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -439,7 +439,6 @@ int main(int argc, char* argv[])
if (graph == nullptr)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -61,14 +61,12 @@ static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct ex
struct tensor* input_tensor = get_ir_graph_tensor(graph, node->input_tensors[0]);
struct tensor* input_tensor1 = get_ir_graph_tensor(graph, node->input_tensors[1]);
struct tensor* output_tensor = get_ir_graph_tensor(graph, node->output_tensors[0]);
struct comparison_param* param = ( struct comparison_param* )node->op.param_mem;
void* input0 = input_tensor->data;
void* input1 = input_tensor1->data;
void* output = output_tensor->data;
_comparison_param op_param;
......
......@@ -211,15 +211,15 @@ static int ref_eltwise_fp32(void* output, void* input0, void* input1, int type,
case ELT_POW:
if(input_count4 == 1){
for(int i = 0; i < input1_count4; i++){
*out_ptr++ = pow(in0[0], in1[i]);
*out_ptr++ = powf(in0[0], in1[i]);
}
} else if (input1_count4 == 1){
for(int i = 0; i < input1_count4; i++){
*out_ptr++ = pow(in0[0], in1[i]);
*out_ptr++ = powf(in0[0], in1[i]);
}
} else if (input_count4 == input1_count4){
for(int i = 0; i < input_count4; i++){
*out_ptr++ = pow(in0[i], in1[i]);
*out_ptr++ = powf(in0[i], in1[i]);
}
} else {
TLOG_ERR("Case not support \n");
......@@ -227,7 +227,7 @@ static int ref_eltwise_fp32(void* output, void* input0, void* input1, int type,
break;
case ELT_POWER:
for(int i = 0; i < input_count4; i++){
*out_ptr++ = pow((eltwise_param->shift + eltwise_param->scale * in0[i]), eltwise_param->power);
*out_ptr++ = powf((eltwise_param->shift + eltwise_param->scale * in0[i]), eltwise_param->power);
}
break;
case ELT_LOG:
......
......@@ -148,11 +148,9 @@ static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct ex
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct selu_param* selu_param = ( struct selu_param* )ir_node->op.param_mem;
int num_thread = exec_graph->num_thread;
......
......@@ -44,7 +44,6 @@ static int ref_unary_fp32(struct tensor* input_tensor, struct tensor* output_ten
float* out_data = output_tensor->data;
int size = input_tensor->elem_num;
int type = param->type;
switch (type)
......@@ -322,11 +321,9 @@ static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct ex
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct unary_param* unary_param = ( struct unary_param* )ir_node->op.param_mem;
int ret = -1;
......
......@@ -32,6 +32,18 @@
#include "utility/sys_port.h"
static int infer_shape(ir_node_t* node)
{
ir_graph_t* ir_graph = node->graph;
ir_tensor_t* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]);
ir_tensor_t* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]);
set_ir_tensor_shape(output, input->dims, input->dim_num);
return 0;
}
static int init_op(struct op* op)
{
struct selu_param* selu_param = ( struct selu_param* )sys_malloc(sizeof(struct selu_param));
......@@ -42,8 +54,8 @@ static int init_op(struct op* op)
}
/*set the param default value */
selu_param->alpha = 1.f;
selu_param->lambda = 1.f;
selu_param->alpha = 1.67326319f;
selu_param->lambda = 1.05070102f;
op->param_mem = selu_param;
op->param_size = sizeof(struct selu_param);
......
......@@ -33,7 +33,7 @@
int is_index_in_array(const uint16_t* array, const uint16_t array_size, const uint16_t index)
{
for (uint8_t i = 0; i < array_size; i++)
for (uint16_t i = 0; i < array_size; i++)
{
const uint16_t selected_index = array[i];
......
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "graph/subgraph.h"
#include "module/module.h"
#include "serializer/serializer.h"
#include "tmfile/tm2_serializer.h"
#include "device/device.h"
#include "utility/log.h"
static int reciprocal_op_map(int op)
{
return OP_RECIPROCAL;
}
static int tm2_load_reciprocal(struct graph* ir_graph, struct node* ir_node, const TM2_Node* tm_node,
const TM2_Operator* tm_op)
{
return 0;
}
int register_tm2_reciprocal_op()
{
struct serializer* tm2_s = find_serializer_via_name("tengine");
if (tm2_s == NULL)
{
TLOG_ERR("tengine serializer has not been registered yet\n");
return -1;
}
tm2_s->register_op_loader(tm2_s, TM2_OPTYPE_RECIPROCAL, 1, tm2_load_reciprocal, reciprocal_op_map, NULL);
return 0;
}
int unregister_tm2_reciprocal_op()
{
struct serializer* tm2_s = find_serializer_via_name("tengine");
tm2_s->unregister_op_loader(tm2_s, TM2_OPTYPE_RECIPROCAL, 1, tm2_load_reciprocal);
return 0;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: bhu@openailab.com
*/
#include "selu_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "graph/subgraph.h"
#include "module/module.h"
#include "serializer/serializer.h"
#include "tmfile/tm2_serializer.h"
#include "device/device.h"
#include "utility/log.h"
static int selu_op_map(int op)
{
return OP_SELU;
}
static int tm2_load_selu(struct graph* ir_graph, struct node* ir_node, const TM2_Node* tm_node,
const TM2_Operator* tm_op)
{
struct selu_param* selu_param = ( struct selu_param* )ir_node->op.param_mem;
const struct tm2_priv* tm2_priv = (struct tm2_priv*)ir_graph->serializer_privacy;
const char* mem_base = tm2_priv->base;
const TM2_SeluParam* tm_param = ( TM2_SeluParam* )(mem_base + tm_op->offset_t_param);
selu_param->alpha = tm_param->alpha;
selu_param->lambda = tm_param->lambda;
return 0;
}
int register_tm2_selu_op()
{
struct serializer* tm2_s = find_serializer_via_name("tengine");
if (tm2_s == NULL)
{
TLOG_ERR("tengine serializer has not been registered yet\n");
return -1;
}
tm2_s->register_op_loader(tm2_s, TM2_OPTYPE_SELU, 1, tm2_load_selu, selu_op_map, NULL);
return 0;
}
int unregister_tm2_selu_op()
{
struct serializer* tm2_s = find_serializer_via_name("tengine");
tm2_s->unregister_op_loader(tm2_s, TM2_OPTYPE_SELU, 1, tm2_load_selu);
return 0;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "graph/subgraph.h"
#include "module/module.h"
#include "serializer/serializer.h"
#include "tmfile/tm2_serializer.h"
#include "device/device.h"
#include "utility/log.h"
static int softplus_op_map(int op)
{
return OP_SOFTPLUS;
}
static int tm2_load_softplus(struct graph* ir_graph, struct node* ir_node, const TM2_Node* tm_node,
const TM2_Operator* tm_op)
{
return 0;
}
int register_tm2_softplus_op()
{
struct serializer* tm2_s = find_serializer_via_name("tengine");
if (tm2_s == NULL)
{
TLOG_ERR("tengine serializer has not been registered yet\n");
return -1;
}
tm2_s->register_op_loader(tm2_s, TM2_OPTYPE_SOFTPLUS, 1, tm2_load_softplus, softplus_op_map, NULL);
return 0;
}
int unregister_tm2_softplus_op()
{
struct serializer* tm2_s = find_serializer_via_name("tengine");
tm2_s->unregister_op_loader(tm2_s, TM2_OPTYPE_SOFTPLUS, 1, tm2_load_softplus);
return 0;
}
......@@ -146,7 +146,8 @@ typedef uint8_t tm_bool_t; /* bool is 1-byte unsigned integ
#define TM2_OPSTR_SCATTER "Scatter"
#define TM2_OPSTR_TILE "Tile"
#define TM2_OPSTR_L2POOL "L2Pool"
#define TM2_OPSTR_SOFTPLUS "Softplus"
#define TM2_OPSTR_RECIPROCAL "Reciprocal"
/* Operator types */
#define TM2_OPTYPE_ACCURACY 0 /* No Param */
#define TM2_OPTYPE_BATCHNORMALIZATION 1 /* TM2_BatchNormParam */
......@@ -250,7 +251,9 @@ typedef uint8_t tm_bool_t; /* bool is 1-byte unsigned integ
#define TM2_OPTYPE_LOGSOFTMAX 99
#define TM2_OPTYPE_RELU1 100
#define TM2_OPTYPE_L2NORMALIZATION 101
#define TM2_OPTYPE_NUM 102
#define TM2_OPTYPE_SOFTPLUS 102
#define TM2_OPTYPE_RECIPROCAL 103
#define TM2_OPTYPE_NUM 104
/* --------------------- -------- TM objects -------------------------------- */
......
......@@ -75,11 +75,16 @@ if(PROTOBUF_FOUND)
add_test (${name} ${name})
endfunction()
tengine_onnx_op_test(test_onnx_op_abs op/test_onnx_op_abs.cpp)
tengine_onnx_op_test(test_onnx_op_acos op/test_onnx_op_acos.cpp)
tengine_onnx_op_test(test_onnx_op_add op/test_onnx_op_add.cpp)
tengine_onnx_op_test(test_onnx_op_asin op/test_onnx_op_asin.cpp)
tengine_onnx_op_test(test_onnx_op_atan op/test_onnx_op_atan.cpp)
tengine_onnx_op_test(test_onnx_op_averagepool_2d_default op/test_onnx_op_averagepool_2d_default.cpp)
tengine_onnx_op_test(test_onnx_op_averagepool_2d_pads op/test_onnx_op_averagepool_2d_pads.cpp)
tengine_onnx_op_test(test_onnx_op_basic_conv_with_padding op/test_onnx_op_basic_conv_with_padding.cpp)
tengine_onnx_op_test(test_onnx_op_basic_conv_without_padding op/test_onnx_op_basic_conv_without_padding.cpp)
tengine_onnx_op_test(test_onnx_op_ceil op/test_onnx_op_ceil.cpp)
# tengine_onnx_op_test(test_onnx_op_concat_1d_axis_0 op/test_onnx_op_concat_1d_axis_0.cpp)
tengine_onnx_op_test(test_onnx_op_concat_2d_axis_0 op/test_onnx_op_concat_2d_axis_0.cpp)
tengine_onnx_op_test(test_onnx_op_concat_2d_axis_1 op/test_onnx_op_concat_2d_axis_1.cpp)
......@@ -88,6 +93,7 @@ if(PROTOBUF_FOUND)
tengine_onnx_op_test(test_onnx_op_concat_3d_axis_2 op/test_onnx_op_concat_3d_axis_2.cpp)
tengine_onnx_op_test(test_onnx_op_conv_with_strides_no_padding op/test_onnx_op_conv_with_strides_no_padding.cpp)
tengine_onnx_op_test(test_onnx_op_conv_with_strides_padding op/test_onnx_op_conv_with_strides_padding.cpp)
tengine_onnx_op_test(test_onnx_op_cos op/test_onnx_op_cos.cpp)
tengine_onnx_op_test(test_onnx_op_div op/test_onnx_op_div.cpp)
tengine_onnx_op_test(test_onnx_op_dropout_default op/test_onnx_op_dropout_default.cpp)
tengine_onnx_op_test(test_onnx_op_elu op/test_onnx_op_elu.cpp)
......@@ -98,14 +104,22 @@ if(PROTOBUF_FOUND)
# tengine_onnx_op_test(test_onnx_op_gru_seq_length op/test_onnx_op_gru_seq_length.cpp)
tengine_onnx_op_test(test_onnx_op_gru_with_initial_bias op/test_onnx_op_gru_with_initial_bias.cpp)
tengine_onnx_op_test(test_onnx_op_leakyrelu op/test_onnx_op_leakyrelu.cpp)
tengine_onnx_op_test(test_onnx_op_log op/test_onnx_op_log.cpp)
# tengine_onnx_op_test(test_onnx_op_logsoftmax_default_axis op/test_onnx_op_logsoftmax_default_axis.cpp)
tengine_onnx_op_test(test_onnx_op_lstm_defaults op/test_onnx_op_lstm_defaults.cpp)
tengine_onnx_op_test(test_onnx_op_lstm_with_initial_bias op/test_onnx_op_lstm_with_initial_bias.cpp)
tengine_onnx_op_test(test_onnx_op_maxpool_2d_default op/test_onnx_op_maxpool_2d_default.cpp)
# tengine_onnx_op_test(test_onnx_op_maxpool_2d_dilations op/test_onnx_op_maxpool_2d_dilations.cpp)
tengine_onnx_op_test(test_onnx_op_maxpool_2d_pads op/test_onnx_op_maxpool_2d_pads.cpp)
tengine_onnx_op_test(test_onnx_op_pow op/test_onnx_op_pow.cpp)
tengine_onnx_op_test(test_onnx_op_reciprocal op/test_onnx_op_reciprocal.cpp)
tengine_onnx_op_test(test_onnx_op_relu op/test_onnx_op_relu.cpp)
tengine_onnx_op_test(test_onnx_op_selu op/test_onnx_op_selu.cpp)
tengine_onnx_op_test(test_onnx_op_selu_default op/test_onnx_op_selu_default.cpp)
# tengine_onnx_op_test(test_onnx_op_softmax_default_axis op/test_onnx_op_softmax_default_axis.cpp)
tengine_onnx_op_test(test_onnx_op_softplus op/test_onnx_op_softplus.cpp)
tengine_onnx_op_test(test_onnx_op_squeeze op/test_onnx_op_squeeze.cpp)
tengine_onnx_op_test(test_onnx_op_sub op/test_onnx_op_sub.cpp)
tengine_onnx_op_test(test_onnx_op_tanh op/test_onnx_op_tanh.cpp)
tengine_onnx_op_test(test_onnx_op_unsqueeze_axis_1 op/test_onnx_op_unsqueeze_axis_1.cpp)
else()
......
......@@ -79,7 +79,6 @@ static int detect_yolact(const char* model_file, int repeat_count,
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
......@@ -243,7 +243,6 @@ printf("img:%d %d\n", img_w, img_h);
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
fprintf(stderr, "errno: %d \n", get_tengine_errno());
return -1;
}
......
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "test_onnx_op.h"
std::string node = "test_abs";
std::string input_pb = "../onnx_node/" + node + "/test_data_set_0/input_0.pb";
std::string output_pb = "../onnx_node/" + node + "/test_data_set_0/output_0.pb";
std::string model = "../onnx_node/" + node + "/onnx.tmfile";
int main(int argc, char* argv[])
{
int n = 1;
int c = 3;
int h = 4;
int w = 5;
/* set runtime options */
struct options opt;
opt.num_thread = 1;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_FP32;
opt.affinity = 0;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(nullptr, "tengine", model.c_str());
if (nullptr == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
int input_size = n * c * h * w;
int dims[] = {n, c, h, w};
std::vector<float> feature_in(input_size);
tensor_t input_tensor = get_graph_input_tensor(graph, 0, 0);
if (input_tensor == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor, dims, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor, feature_in.data(), input_size * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data(feature_in.data(), input_pb);
/* run graph */
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
/* get the current result of inference */
tensor_t output_tensor = get_graph_output_tensor(graph, 0, 0);
float* output_data = ( float* )get_tensor_buffer(output_tensor);
int output_size = get_tensor_buffer_size(output_tensor) / sizeof(float);
/* get the reference result of inference */
std::vector<float> reference_out(output_size);
get_pb_data(reference_out.data(), output_pb);
/* check the result */
int ret = float_mismatch(output_data, reference_out.data(), output_size);
/* release tengine */
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return ret;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "test_onnx_op.h"
std::string node = "test_acos";
std::string input_pb = "../onnx_node/" + node + "/test_data_set_0/input_0.pb";
std::string output_pb = "../onnx_node/" + node + "/test_data_set_0/output_0.pb";
std::string model = "../onnx_node/" + node + "/onnx.tmfile";
int main(int argc, char* argv[])
{
int n = 1;
int c = 3;
int h = 4;
int w = 5;
/* set runtime options */
struct options opt;
opt.num_thread = 1;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_FP32;
opt.affinity = 0;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(nullptr, "tengine", model.c_str());
if (nullptr == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
int input_size = n * c * h * w;
int dims[] = {n, c, h, w};
std::vector<float> feature_in(input_size);
tensor_t input_tensor = get_graph_input_tensor(graph, 0, 0);
if (input_tensor == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor, dims, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor, feature_in.data(), input_size * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data(feature_in.data(), input_pb);
/* run graph */
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
/* get the current result of inference */
tensor_t output_tensor = get_graph_output_tensor(graph, 0, 0);
float* output_data = ( float* )get_tensor_buffer(output_tensor);
int output_size = get_tensor_buffer_size(output_tensor) / sizeof(float);
/* get the reference result of inference */
std::vector<float> reference_out(output_size);
get_pb_data(reference_out.data(), output_pb);
/* check the result */
int ret = float_mismatch(output_data, reference_out.data(), output_size);
/* release tengine */
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return ret;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "test_onnx_op.h"
std::string node = "test_asin";
std::string input_pb = "../onnx_node/" + node + "/test_data_set_0/input_0.pb";
std::string output_pb = "../onnx_node/" + node + "/test_data_set_0/output_0.pb";
std::string model = "../onnx_node/" + node + "/onnx.tmfile";
int main(int argc, char* argv[])
{
int n = 1;
int c = 3;
int h = 4;
int w = 5;
/* set runtime options */
struct options opt;
opt.num_thread = 1;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_FP32;
opt.affinity = 0;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(nullptr, "tengine", model.c_str());
if (nullptr == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
int input_size = n * c * h * w;
int dims[] = {n, c, h, w};
std::vector<float> feature_in(input_size);
tensor_t input_tensor = get_graph_input_tensor(graph, 0, 0);
if (input_tensor == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor, dims, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor, feature_in.data(), input_size * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data(feature_in.data(), input_pb);
/* run graph */
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
/* get the current result of inference */
tensor_t output_tensor = get_graph_output_tensor(graph, 0, 0);
float* output_data = ( float* )get_tensor_buffer(output_tensor);
int output_size = get_tensor_buffer_size(output_tensor) / sizeof(float);
/* get the reference result of inference */
std::vector<float> reference_out(output_size);
get_pb_data(reference_out.data(), output_pb);
/* check the result */
int ret = float_mismatch(output_data, reference_out.data(), output_size);
/* release tengine */
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return ret;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "test_onnx_op.h"
std::string node = "test_atan";
std::string input_pb = "../onnx_node/" + node + "/test_data_set_0/input_0.pb";
std::string output_pb = "../onnx_node/" + node + "/test_data_set_0/output_0.pb";
std::string model = "../onnx_node/" + node + "/onnx.tmfile";
int main(int argc, char* argv[])
{
int n = 1;
int c = 3;
int h = 4;
int w = 5;
/* set runtime options */
struct options opt;
opt.num_thread = 1;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_FP32;
opt.affinity = 0;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(nullptr, "tengine", model.c_str());
if (nullptr == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
int input_size = n * c * h * w;
int dims[] = {n, c, h, w};
std::vector<float> feature_in(input_size);
tensor_t input_tensor = get_graph_input_tensor(graph, 0, 0);
if (input_tensor == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor, dims, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor, feature_in.data(), input_size * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data(feature_in.data(), input_pb);
/* run graph */
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
/* get the current result of inference */
tensor_t output_tensor = get_graph_output_tensor(graph, 0, 0);
float* output_data = ( float* )get_tensor_buffer(output_tensor);
int output_size = get_tensor_buffer_size(output_tensor) / sizeof(float);
/* get the reference result of inference */
std::vector<float> reference_out(output_size);
get_pb_data(reference_out.data(), output_pb);
/* check the result */
int ret = float_mismatch(output_data, reference_out.data(), output_size);
/* release tengine */
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return ret;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "test_onnx_op.h"
std::string node = "test_ceil";
std::string input_pb = "../onnx_node/" + node + "/test_data_set_0/input_0.pb";
std::string output_pb = "../onnx_node/" + node + "/test_data_set_0/output_0.pb";
std::string model = "../onnx_node/" + node + "/onnx.tmfile";
int main(int argc, char* argv[])
{
int n = 1;
int c = 3;
int h = 4;
int w = 5;
/* set runtime options */
struct options opt;
opt.num_thread = 1;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_FP32;
opt.affinity = 0;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(nullptr, "tengine", model.c_str());
if (nullptr == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
int input_size = n * c * h * w;
int dims[] = {n, c, h, w};
std::vector<float> feature_in(input_size);
tensor_t input_tensor = get_graph_input_tensor(graph, 0, 0);
if (input_tensor == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor, dims, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor, feature_in.data(), input_size * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data(feature_in.data(), input_pb);
/* run graph */
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
/* get the current result of inference */
tensor_t output_tensor = get_graph_output_tensor(graph, 0, 0);
float* output_data = ( float* )get_tensor_buffer(output_tensor);
int output_size = get_tensor_buffer_size(output_tensor) / sizeof(float);
/* get the reference result of inference */
std::vector<float> reference_out(output_size);
get_pb_data(reference_out.data(), output_pb);
/* check the result */
int ret = float_mismatch(output_data, reference_out.data(), output_size);
/* release tengine */
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return ret;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "test_onnx_op.h"
std::string node = "test_cos";
std::string input_pb = "../onnx_node/" + node + "/test_data_set_0/input_0.pb";
std::string output_pb = "../onnx_node/" + node + "/test_data_set_0/output_0.pb";
std::string model = "../onnx_node/" + node + "/onnx.tmfile";
int main(int argc, char* argv[])
{
int n = 1;
int c = 3;
int h = 4;
int w = 5;
/* set runtime options */
struct options opt;
opt.num_thread = 1;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_FP32;
opt.affinity = 0;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(nullptr, "tengine", model.c_str());
if (nullptr == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
int input_size = n * c * h * w;
int dims[] = {n, c, h, w};
std::vector<float> feature_in(input_size);
tensor_t input_tensor = get_graph_input_tensor(graph, 0, 0);
if (input_tensor == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor, dims, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor, feature_in.data(), input_size * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data(feature_in.data(), input_pb);
/* run graph */
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
/* get the current result of inference */
tensor_t output_tensor = get_graph_output_tensor(graph, 0, 0);
float* output_data = ( float* )get_tensor_buffer(output_tensor);
int output_size = get_tensor_buffer_size(output_tensor) / sizeof(float);
/* get the reference result of inference */
std::vector<float> reference_out(output_size);
get_pb_data(reference_out.data(), output_pb);
/* check the result */
int ret = float_mismatch(output_data, reference_out.data(), output_size);
/* release tengine */
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return ret;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "test_onnx_op.h"
std::string node = "test_log";
std::string input_pb = "../onnx_node/" + node + "/test_data_set_0/input_0.pb";
std::string output_pb = "../onnx_node/" + node + "/test_data_set_0/output_0.pb";
std::string model = "../onnx_node/" + node + "/onnx.tmfile";
int main(int argc, char* argv[])
{
int n = 1;
int c = 3;
int h = 4;
int w = 5;
/* set runtime options */
struct options opt;
opt.num_thread = 1;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_FP32;
opt.affinity = 0;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(nullptr, "tengine", model.c_str());
if (nullptr == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
int input_size = n * c * h * w;
int dims[] = {n, c, h, w};
std::vector<float> feature_in(input_size);
tensor_t input_tensor = get_graph_input_tensor(graph, 0, 0);
if (input_tensor == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor, dims, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor, feature_in.data(), input_size * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data(feature_in.data(), input_pb);
/* run graph */
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
/* get the current result of inference */
tensor_t output_tensor = get_graph_output_tensor(graph, 0, 0);
float* output_data = ( float* )get_tensor_buffer(output_tensor);
int output_size = get_tensor_buffer_size(output_tensor) / sizeof(float);
/* get the reference result of inference */
std::vector<float> reference_out(output_size);
get_pb_data(reference_out.data(), output_pb);
/* check the result */
int ret = float_mismatch(output_data, reference_out.data(), output_size);
/* release tengine */
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return ret;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "test_onnx_op.h"
std::string node = "test_logsoftmax_default_axis";
std::string input_pb = "../onnx_node/" + node + "/test_data_set_0/input_0.pb";
std::string output_pb = "../onnx_node/" + node + "/test_data_set_0/output_0.pb";
std::string model = "../onnx_node/" + node + "/onnx.tmfile";
int main(int argc, char* argv[])
{
int n = 1;
int c = 3;
int h = 4;
int w = 5;
/* set runtime options */
struct options opt;
opt.num_thread = 1;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_FP32;
opt.affinity = 0;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(nullptr, "tengine", model.c_str());
if (nullptr == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
int input_size = n * c * h * w;
int dims[] = {n, c, h, w};
std::vector<float> feature_in(input_size);
tensor_t input_tensor = get_graph_input_tensor(graph, 0, 0);
if (input_tensor == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor, dims, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor, feature_in.data(), input_size * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data(feature_in.data(), input_pb);
/* run graph */
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
/* get the current result of inference */
tensor_t output_tensor = get_graph_output_tensor(graph, 0, 0);
float* output_data = ( float* )get_tensor_buffer(output_tensor);
int output_size = get_tensor_buffer_size(output_tensor) / sizeof(float);
/* get the reference result of inference */
std::vector<float> reference_out(output_size);
get_pb_data(reference_out.data(), output_pb);
/* check the result */
int ret = float_mismatch(output_data, reference_out.data(), output_size);
/* release tengine */
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return ret;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "test_onnx_op.h"
std::string node = "test_pow";
std::string input_pb_0 = "../onnx_node/" + node + "/test_data_set_0/input_0.pb";
std::string input_pb_1 = "../onnx_node/" + node + "/test_data_set_0/input_1.pb";
std::string output_pb = "../onnx_node/" + node + "/test_data_set_0/output_0.pb";
std::string model = "../onnx_node/" + node + "/onnx.tmfile";
int main(int argc, char* argv[])
{
int n_0 = 1;
int c_0 = 3;
int h_0 = 4;
int w_0 = 5;
int n_1 = 1;
int c_1 = 3;
int h_1 = 4;
int w_1 = 5;
/* set runtime options */
struct options opt;
opt.num_thread = 1;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_FP32;
opt.affinity = 0;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(nullptr, "tengine", model.c_str());
if (nullptr == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
/* input 0 */
int input_size_0 = n_0 * c_0 * h_0 * w_0;
int dims_0[] = {n_0, c_0, h_0, w_0};
std::vector<float> feature_in_0(input_size_0);
tensor_t input_tensor_0 = get_graph_input_tensor(graph, 0, 0);
if (input_tensor_0 == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor_0, dims_0, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor_0, feature_in_0.data(), input_size_0 * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* input 1 */
int input_size_1 = n_1 * c_1 * h_1 * w_1;
int dims_1[] = {n_1, c_1, h_1, w_1};
std::vector<float> feature_in_1(input_size_1);
tensor_t input_tensor_1 = get_graph_input_tensor(graph, 1, 0);
if (input_tensor_1 == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor_1, dims_1, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor_1, feature_in_1.data(), input_size_1 * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data(feature_in_0.data(), input_pb_0);
get_pb_data(feature_in_1.data(), input_pb_1);
/* run graph */
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
/* get the current result of inference */
tensor_t output_tensor = get_graph_output_tensor(graph, 0, 0);
float* output_data = ( float* )get_tensor_buffer(output_tensor);
int output_size = get_tensor_buffer_size(output_tensor) / sizeof(float);
/* get the reference result of inference */
std::vector<float> reference_out(output_size);
get_pb_data(reference_out.data(), output_pb);
/* check the result */
int ret = float_mismatch(output_data, reference_out.data(), output_size);
/* release tengine */
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return 0;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "test_onnx_op.h"
std::string node = "test_reciprocal";
std::string input_pb = "../onnx_node/" + node + "/test_data_set_0/input_0.pb";
std::string output_pb = "../onnx_node/" + node + "/test_data_set_0/output_0.pb";
std::string model = "../onnx_node/" + node + "/onnx.tmfile";
int main(int argc, char* argv[])
{
int n = 1;
int c = 3;
int h = 4;
int w = 5;
/* set runtime options */
struct options opt;
opt.num_thread = 1;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_FP32;
opt.affinity = 0;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(nullptr, "tengine", model.c_str());
if (nullptr == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
int input_size = n * c * h * w;
int dims[] = {n, c, h, w};
std::vector<float> feature_in(input_size);
tensor_t input_tensor = get_graph_input_tensor(graph, 0, 0);
if (input_tensor == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor, dims, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor, feature_in.data(), input_size * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data(feature_in.data(), input_pb);
/* run graph */
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
/* get the current result of inference */
tensor_t output_tensor = get_graph_output_tensor(graph, 0, 0);
float* output_data = ( float* )get_tensor_buffer(output_tensor);
int output_size = get_tensor_buffer_size(output_tensor) / sizeof(float);
/* get the reference result of inference */
std::vector<float> reference_out(output_size);
get_pb_data(reference_out.data(), output_pb);
/* check the result */
int ret = float_mismatch(output_data, reference_out.data(), output_size);
/* release tengine */
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return ret;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "test_onnx_op.h"
std::string node = "test_selu";
std::string input_pb = "../onnx_node/" + node + "/test_data_set_0/input_0.pb";
std::string output_pb = "../onnx_node/" + node + "/test_data_set_0/output_0.pb";
std::string model = "../onnx_node/" + node + "/onnx.tmfile";
int main(int argc, char* argv[])
{
int n = 1;
int c = 3;
int h = 4;
int w = 5;
/* set runtime options */
struct options opt;
opt.num_thread = 1;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_FP32;
opt.affinity = 0;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(nullptr, "tengine", model.c_str());
if (nullptr == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
int input_size = n * c * h * w;
int dims[] = {n, c, h, w};
std::vector<float> feature_in(input_size);
tensor_t input_tensor = get_graph_input_tensor(graph, 0, 0);
if (input_tensor == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor, dims, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor, feature_in.data(), input_size * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data(feature_in.data(), input_pb);
/* run graph */
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
/* get the current result of inference */
tensor_t output_tensor = get_graph_output_tensor(graph, 0, 0);
float* output_data = ( float* )get_tensor_buffer(output_tensor);
int output_size = get_tensor_buffer_size(output_tensor) / sizeof(float);
/* get the reference result of inference */
std::vector<float> reference_out(output_size);
get_pb_data(reference_out.data(), output_pb);
/* check the result */
int ret = float_mismatch(output_data, reference_out.data(), output_size);
/* release tengine */
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return ret;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "test_onnx_op.h"
std::string node = "test_selu_default";
std::string input_pb = "../onnx_node/" + node + "/test_data_set_0/input_0.pb";
std::string output_pb = "../onnx_node/" + node + "/test_data_set_0/output_0.pb";
std::string model = "../onnx_node/" + node + "/onnx.tmfile";
int main(int argc, char* argv[])
{
int n = 1;
int c = 3;
int h = 4;
int w = 5;
/* set runtime options */
struct options opt;
opt.num_thread = 1;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_FP32;
opt.affinity = 0;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(nullptr, "tengine", model.c_str());
if (nullptr == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
int input_size = n * c * h * w;
int dims[] = {n, c, h, w};
std::vector<float> feature_in(input_size);
tensor_t input_tensor = get_graph_input_tensor(graph, 0, 0);
if (input_tensor == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor, dims, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor, feature_in.data(), input_size * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data(feature_in.data(), input_pb);
/* run graph */
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
/* get the current result of inference */
tensor_t output_tensor = get_graph_output_tensor(graph, 0, 0);
float* output_data = ( float* )get_tensor_buffer(output_tensor);
int output_size = get_tensor_buffer_size(output_tensor) / sizeof(float);
/* get the reference result of inference */
std::vector<float> reference_out(output_size);
get_pb_data(reference_out.data(), output_pb);
/* check the result */
int ret = float_mismatch(output_data, reference_out.data(), output_size);
/* release tengine */
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return ret;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "test_onnx_op.h"
std::string node = "test_softplus";
std::string input_pb = "../onnx_node/" + node + "/test_data_set_0/input_0.pb";
std::string output_pb = "../onnx_node/" + node + "/test_data_set_0/output_0.pb";
std::string model = "../onnx_node/" + node + "/onnx.tmfile";
int main(int argc, char* argv[])
{
int n = 1;
int c = 3;
int h = 4;
int w = 5;
/* set runtime options */
struct options opt;
opt.num_thread = 1;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_FP32;
opt.affinity = 0;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(nullptr, "tengine", model.c_str());
if (nullptr == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
int input_size = n * c * h * w;
int dims[] = {n, c, h, w};
std::vector<float> feature_in(input_size);
tensor_t input_tensor = get_graph_input_tensor(graph, 0, 0);
if (input_tensor == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor, dims, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor, feature_in.data(), input_size * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data(feature_in.data(), input_pb);
/* run graph */
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
/* get the current result of inference */
tensor_t output_tensor = get_graph_output_tensor(graph, 0, 0);
float* output_data = ( float* )get_tensor_buffer(output_tensor);
int output_size = get_tensor_buffer_size(output_tensor) / sizeof(float);
/* get the reference result of inference */
std::vector<float> reference_out(output_size);
get_pb_data(reference_out.data(), output_pb);
/* check the result */
int ret = float_mismatch(output_data, reference_out.data(), output_size);
/* release tengine */
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return ret;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "test_onnx_op.h"
std::string node = "test_sub";
std::string input_pb_0 = "../onnx_node/" + node + "/test_data_set_0/input_0.pb";
std::string input_pb_1 = "../onnx_node/" + node + "/test_data_set_0/input_1.pb";
std::string output_pb = "../onnx_node/" + node + "/test_data_set_0/output_0.pb";
std::string model = "../onnx_node/" + node + "/onnx.tmfile";
int main(int argc, char* argv[])
{
int n_0 = 1;
int c_0 = 3;
int h_0 = 4;
int w_0 = 5;
int n_1 = 1;
int c_1 = 3;
int h_1 = 4;
int w_1 = 5;
/* set runtime options */
struct options opt;
opt.num_thread = 1;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_FP32;
opt.affinity = 0;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(nullptr, "tengine", model.c_str());
if (nullptr == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
/* input 0 */
int input_size_0 = n_0 * c_0 * h_0 * w_0;
int dims_0[] = {n_0, c_0, h_0, w_0};
std::vector<float> feature_in_0(input_size_0);
tensor_t input_tensor_0 = get_graph_input_tensor(graph, 0, 0);
if (input_tensor_0 == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor_0, dims_0, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor_0, feature_in_0.data(), input_size_0 * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* input 1 */
int input_size_1 = n_1 * c_1 * h_1 * w_1;
int dims_1[] = {n_1, c_1, h_1, w_1};
std::vector<float> feature_in_1(input_size_1);
tensor_t input_tensor_1 = get_graph_input_tensor(graph, 1, 0);
if (input_tensor_1 == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor_1, dims_1, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor_1, feature_in_1.data(), input_size_1 * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data(feature_in_0.data(), input_pb_0);
get_pb_data(feature_in_1.data(), input_pb_1);
/* run graph */
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
/* get the current result of inference */
tensor_t output_tensor = get_graph_output_tensor(graph, 0, 0);
float* output_data = ( float* )get_tensor_buffer(output_tensor);
int output_size = get_tensor_buffer_size(output_tensor) / sizeof(float);
/* get the reference result of inference */
std::vector<float> reference_out(output_size);
get_pb_data(reference_out.data(), output_pb);
/* check the result */
int ret = float_mismatch(output_data, reference_out.data(), output_size);
/* release tengine */
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return ret;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册