未验证 提交 c89ddb8c 编写于 作者: B BUG1989 提交者: GitHub

Fix clip (#697)

* Fix, slice, clip impl, add optest, clip with min and max tensor
上级 912e3d7a
......@@ -453,7 +453,7 @@ static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct ex
if (input_tensor->dims[0] == out_tensor->dims[0] && input_tensor->dims[1] == out_tensor->dims[1] &&
input_tensor->dims[2] == out_tensor->dims[2] && input_tensor->dims[3] == out_tensor->dims[3])
{
memcpy(( void* )(out_data_ptrs[0]), ( void* )input, mem_size);
memcpy(( void* )(out_data_ptrs[0]), ( void* )input, mem_size*input_tensor->elem_num);
sys_free(out_data_ptrs);
return true;
}
......
......@@ -39,6 +39,18 @@ static int infer_shape(struct node* node)
struct tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]);
struct tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]);
if (node->input_num == 3)
{
struct tensor* clip_min = get_ir_graph_tensor(ir_graph, node->input_tensors[1]);
struct tensor* clip_max = get_ir_graph_tensor(ir_graph, node->input_tensors[2]);
struct clip_param* clip_param = ( struct clip_param* )node->op.param_mem;
float* min = (float *)clip_min->data;
float* max = (float *)clip_max->data;
clip_param->min = min[0];
clip_param->max = max[0];
}
set_ir_tensor_shape(output, input->dims, input->dim_num);
return 0;
......
......@@ -108,6 +108,7 @@ if(PROTOBUF_FOUND)
tengine_onnx_op_test(test_onnx_op_basic_conv_with_padding op/test_onnx_op_basic_conv_with_padding.cpp)
tengine_onnx_op_test(test_onnx_op_basic_conv_without_padding op/test_onnx_op_basic_conv_without_padding.cpp)
tengine_onnx_op_test(test_onnx_op_ceil op/test_onnx_op_ceil.cpp)
tengine_onnx_op_test(test_onnx_op_clip_example op/test_onnx_op_clip_example.cpp)
# tengine_onnx_op_test(test_onnx_op_concat_1d_axis_0 op/test_onnx_op_concat_1d_axis_0.cpp)
tengine_onnx_op_test(test_onnx_op_concat_2d_axis_0 op/test_onnx_op_concat_2d_axis_0.cpp)
tengine_onnx_op_test(test_onnx_op_concat_2d_axis_1 op/test_onnx_op_concat_2d_axis_1.cpp)
......
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "test_onnx_op.h"
std::string node = "test_clip_example";
std::string input_pb_0 = "../onnx_node/" + node + "/test_data_set_0/input_0.pb";
std::string input_pb_1 = "../onnx_node/" + node + "/test_data_set_0/input_1.pb";
std::string input_pb_2 = "../onnx_node/" + node + "/test_data_set_0/input_2.pb";
std::string output_pb = "../onnx_node/" + node + "/test_data_set_0/output_0.pb";
std::string model = "../onnx_node/" + node + "/onnx.tmfile";
int main(int argc, char* argv[])
{
int w_0 = 3;
int w_1 = 1;
int w_2 = 1;
/* set runtime options */
struct options opt;
opt.num_thread = 1;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_FP32;
opt.affinity = 0;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(nullptr, "tengine", model.c_str());
if (nullptr == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
/* input 0 */
int input_size_0 = w_0;
int dims_0[] = {w_0};
std::vector<float> feature_in_0(input_size_0);
tensor_t input_tensor_0 = get_graph_input_tensor(graph, 0, 0);
if (input_tensor_0 == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor_0, dims_0, 1) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor_0, feature_in_0.data(), input_size_0 * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* input 1 */
int input_size_1 = w_1;
int dims_1[] = {w_1};
std::vector<float> feature_in_1(input_size_1);
tensor_t input_tensor_1 = get_graph_input_tensor(graph, 1, 0);
if (input_tensor_1 == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor_1, dims_1, 1) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor_1, feature_in_1.data(), input_size_1 * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* input 2 */
int input_size_2 = w_2;
int dims_2[] = {w_2};
std::vector<float> feature_in_2(input_size_2);
tensor_t input_tensor_2 = get_graph_input_tensor(graph, 2, 0);
if (input_tensor_2 == nullptr)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor_2, dims_2, 1) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor_2, feature_in_2.data(), input_size_2 * 4) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
get_pb_data(feature_in_0.data(), input_pb_0);
get_pb_data(feature_in_1.data(), input_pb_1);
get_pb_data(feature_in_2.data(), input_pb_2);
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* run graph */
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
/* get the current result of inference */
tensor_t output_tensor = get_graph_output_tensor(graph, 0, 0);
float* output_data = ( float* )get_tensor_buffer(output_tensor);
int output_size = get_tensor_buffer_size(output_tensor) / sizeof(float);
/* get the reference result of inference */
std::vector<float> reference_out(output_size);
get_pb_data(reference_out.data(), output_pb);
/* check the result */
int ret = float_mismatch(output_data, reference_out.data(), output_size);
/* release tengine */
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return ret;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册