提交 5dea0629 编写于 作者: L lixian

optimization for int8

上级 fa50dd02
......@@ -487,9 +487,7 @@ void Im2ColPackUnitInt8Opt(const int8_t *input_data, int8_t *packed_input, int r
for (int m = 0; m < ic4; m++) {
int channel_block_stride = input_x_stride + m * C4NUM;
int channel_block_offset = input_plane_offset + m * tile_num * C4NUM;
for (int k = 0; k < C4NUM; k++) {
(packed_input + channel_block_offset)[k] = (input_data + channel_block_stride)[k];
}
memcpy(packed_input + channel_block_offset, input_data + channel_block_stride, 4);
} // channel_block loop
} // kernel_w loop
} // kernel_h loop
......
......@@ -68,7 +68,6 @@ void Convolution1x1Int8CPUKernel::CheckSupportOptimize() {
matmul_func_ = nullptr;
} else {
support_optimize_ = true;
matmul_func_ = MatMulInt8_8x8_r;
}
} else {
support_optimize_ = false;
......
......@@ -400,9 +400,9 @@ kernel::LiteKernel *CpuConvInt8KernelCreator(const std::vector<lite::tensor::Ten
kernel::LiteKernel *kernel;
auto filter_quant_size = inputs[kWeightIndex]->GetQuantParams().size();
if (kernel_h == 3 && kernel_w == 3 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1) {
kernel = new (std::nothrow) kernel::Convolution3x3Int8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
kernel = new (std::nothrow) kernel::ConvolutionInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
} else if (kernel_h == 1 && kernel_w == 1 && filter_quant_size == 1) {
kernel = new (std::nothrow) kernel::Convolution1x1Int8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
kernel = new (std::nothrow) kernel::ConvolutionInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
} else {
kernel = new (std::nothrow) kernel::ConvolutionInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册