提交 903f399d 编写于 作者: 李寅

Merge branch 'feature_wuch' into 'master'

fix dsp op test

See merge request !133
......@@ -5,6 +5,7 @@
#include "mace/dsp/hexagon_control_wrapper.h"
#include "gtest/gtest.h"
#define RESIZE_BILINEAR_TEST_CHANNELS 128
using namespace mace;
static NetDef BuildNetDef() {
......@@ -17,7 +18,7 @@ static NetDef BuildNetDef() {
input_op->set_type("INPUT");
input_op->set_node_id(0);
input_op->set_padding(0);
input_op->add_out_max_byte_size(1000);
input_op->add_out_max_byte_size(1200);
// relu op
OperatorDef *resize_bilinear_op = net.add_op();
......@@ -45,7 +46,7 @@ static NetDef BuildNetDef() {
input_node_input = resize_bilinear_op->add_node_input();
input_node_input->set_node_id(12);
input_node_input->set_output_port(0);
resize_bilinear_op->add_out_max_byte_size(1000);
resize_bilinear_op->add_out_max_byte_size(1200);
resize_bilinear_op->add_out_max_byte_size(1000);
resize_bilinear_op->add_out_max_byte_size(1000);
......@@ -64,8 +65,8 @@ static NetDef BuildNetDef() {
new_dim_tensor->add_dims(2);
new_dim_tensor->set_data_type(DataType::DT_INT32);
new_dim_tensor->set_node_id(10);
new_dim_tensor->add_int32_data(1);
new_dim_tensor->add_int32_data(1);
new_dim_tensor->add_int32_data(2);
new_dim_tensor->add_int32_data(2);
TensorProto *input_min_tensor = net.add_tensors();
input_min_tensor->set_name("input_min");
......@@ -86,20 +87,20 @@ static NetDef BuildNetDef() {
input_info->set_name("input_node");
input_info->set_node_id(0);
input_info->add_dims(1);
input_info->add_dims(2);
input_info->add_dims(2);
input_info->add_dims(128);
input_info->add_dims(3);
input_info->add_dims(3);
input_info->add_dims(RESIZE_BILINEAR_TEST_CHANNELS);
input_info->set_data_type(DataType::DT_UINT8);
input_info->set_max_byte_size(1000);
input_info->set_max_byte_size(1200);
OutputInfo *output_info = net.add_output_info();
output_info->set_name("output_node");
output_info->set_node_id(1);
output_info->add_dims(1);
output_info->add_dims(1);
output_info->add_dims(1);
output_info->add_dims(128);
output_info->add_dims(2);
output_info->add_dims(2);
output_info->add_dims(RESIZE_BILINEAR_TEST_CHANNELS);
output_info->set_data_type(DataType::DT_UINT8);
output_info->set_max_byte_size(1000);
output_info->set_max_byte_size(1200);
return net;
}
......@@ -117,21 +118,25 @@ TEST(QuantizedResizeBilinearTest, QuantizedResizeBilinear) {
Allocator *cpu_allocator = GetDeviceAllocator(DeviceType::CPU);
Tensor input_tensor(cpu_allocator, DT_UINT8);
Tensor output_tensor(cpu_allocator, DT_UINT8);
input_tensor.Resize({1, 2, 2, 128});
output_tensor.Resize({1, 1, 1, 128});
input_tensor.Resize({1, 3, 3, RESIZE_BILINEAR_TEST_CHANNELS});
output_tensor.Resize({1, 2, 2, RESIZE_BILINEAR_TEST_CHANNELS});
uint8_t *input_data = input_tensor.mutable_data<uint8_t>();
const uint8_t *output_data = output_tensor.data<uint8_t>();
for (int c = 0; c < 128; ++c) {
input_data[c] = input_data[c + 128] = input_data[c + 256]
= input_data[c + 384] = (uint8_t)c;
for (int wh = 0; wh < 9; ++wh) {
for (int c = 0; c < RESIZE_BILINEAR_TEST_CHANNELS; ++c) {
input_data[wh * RESIZE_BILINEAR_TEST_CHANNELS + c] = 9 - wh;
}
}
VLOG(0) << wrapper.ExecuteGraph(input_tensor, &output_tensor);
wrapper.PrintLog();
for (int i = 0; i < output_tensor.size(); ++i) {
EXPECT_EQ(i, output_data[i]);
vector<uint8_t> expected {9, 8, 5, 3};
for (int i = 0; i < 4; ++i) {
for (int c = 0; c < RESIZE_BILINEAR_TEST_CHANNELS; ++c)
EXPECT_EQ(expected[i],
output_data[i * RESIZE_BILINEAR_TEST_CHANNELS + c]);
}
std::cout << std::endl;
......
......@@ -224,7 +224,7 @@ TEST(SupernodeTest, Supernode) {
input_data[h * 4 + w] = (uint8_t)((h == 0) ? 0 : h * 64 - 1);
}
VLOG(0) << wrapper.ExecuteGraphNew(&input_tensor, 1, &output_tensor, 1);
VLOG(0) << wrapper.ExecuteGraph(input_tensor, &output_tensor);
wrapper.PrintLog();
// expect out: [[49.2095, 49.2095], [50.7905, 50.7905]]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册