提交 15d74a8d 编写于 作者: 李寅

Merge branch 'depthwise' into 'master'

update depthwise test

See merge request !738
...@@ -373,10 +373,10 @@ struct PoolingFunctor<DeviceType::CPU, uint8_t>: PoolingFunctorBase { ...@@ -373,10 +373,10 @@ struct PoolingFunctor<DeviceType::CPU, uint8_t>: PoolingFunctorBase {
MACE_UNUSED(future); MACE_UNUSED(future);
MACE_CHECK(dilations_[0] == 1 && dilations_[1] == 1, MACE_CHECK(dilations_[0] == 1 && dilations_[1] == 1,
"Quantized pooling does not support dilation > 1 yet."); "Quantized pooling does not support dilation > 1 yet.");
MACE_CHECK(input_tensor->scale() == output_tensor->scale(), // Use the same scale and zero point with input and output.
"Quantized pooling's input and output scale are not equal."); output_tensor->SetScale(input_tensor->scale());
MACE_CHECK(input_tensor->zero_point() == output_tensor->zero_point(), output_tensor->SetZeroPoint(input_tensor->zero_point());
"Quantized pooling's input and output zero_point are not equal");
std::vector<index_t> output_shape(4); std::vector<index_t> output_shape(4);
std::vector<index_t> filter_shape = { std::vector<index_t> filter_shape = {
input_tensor->dim(3), kernels_[0], kernels_[1], input_tensor->dim(3)}; input_tensor->dim(3), kernels_[0], kernels_[1], input_tensor->dim(3)};
......
...@@ -497,11 +497,11 @@ void TestQuant(const index_t batch, ...@@ -497,11 +497,11 @@ void TestQuant(const index_t batch,
TEST_F(DepthwiseConv2dOpTest, Quant) { TEST_F(DepthwiseConv2dOpTest, Quant) {
QuantSimpleValidTest(); QuantSimpleValidTest();
TestQuant(1, 1, 2, 3, 3, 3, 3, VALID, {1, 1}); TestQuant(1, 1, 1024, 7, 7, 3, 3, VALID, {1, 1});
TestQuant(1, 1, 2, 3, 3, 3, 3, SAME, {1, 1}); TestQuant(1, 1, 1024, 7, 7, 3, 3, SAME, {1, 1});
TestQuant(1, 1, 2, 3, 3, 3, 3, FULL, {1, 1}); TestQuant(1, 1, 1024, 7, 7, 3, 3, FULL, {1, 1});
TestQuant(1, 2, 2, 3, 3, 3, 3, SAME, {1, 1}); TestQuant(1, 2, 1024, 7, 7, 3, 3, SAME, {1, 1});
TestQuant(1, 2, 2, 3, 3, 3, 3, SAME, {2, 2}); TestQuant(1, 2, 1024, 7, 7, 3, 3, SAME, {2, 2});
TestQuant(1, 1, 512, 14, 14, 3, 3, SAME, {1, 1}); TestQuant(1, 1, 512, 14, 14, 3, 3, SAME, {1, 1});
TestQuant(1, 1, 512, 14, 13, 5, 5, SAME, {2, 2}); TestQuant(1, 1, 512, 14, 13, 5, 5, SAME, {2, 2});
TestQuant(1, 1, 256, 28, 28, 3, 3, SAME, {1, 1}); TestQuant(1, 1, 256, 28, 28, 3, 3, SAME, {1, 1});
......
...@@ -603,12 +603,7 @@ void TestQuant(const index_t batch, ...@@ -603,12 +603,7 @@ void TestQuant(const index_t batch,
.AddIntArg("pooling_type", pooling) .AddIntArg("pooling_type", pooling)
.AddIntArg("T", DT_UINT8) .AddIntArg("T", DT_UINT8)
.Finalize(net.NewOperatorDef()); .Finalize(net.NewOperatorDef());
net.Setup(DeviceType::CPU); net.RunOp();
Tensor *q_input = net.GetTensor("QuantizedInput");
Tensor *q_output = net.GetTensor("QuantizedOutput");
q_output->SetScale(q_input->scale());
q_output->SetZeroPoint(q_input->zero_point());
net.Run();
OpDefBuilder("Dequantize", "DeQuantizeTest") OpDefBuilder("Dequantize", "DeQuantizeTest")
.Input("QuantizedOutput") .Input("QuantizedOutput")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册