conv_compute.cc 9.5 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "lite/kernels/arm/conv_compute.h"
16
#include <utility>
Y
Yan Chunwei 已提交
17 18
#include "lite/core/op_registry.h"
#include "lite/core/type_system.h"
19 20 21 22
#include "lite/kernels/arm/conv_depthwise.h"
#include "lite/kernels/arm/conv_direct.h"
#include "lite/kernels/arm/conv_gemmlike.h"
#include "lite/kernels/arm/conv_winograd.h"
Y
Yan Chunwei 已提交
23 24 25 26 27 28

namespace paddle {
namespace lite {
namespace kernels {
namespace arm {

29 30
template <>
void ConvCompute<PRECISION(kFloat), PRECISION(kFloat)>::PrepareForRun() {
Y
Yan Chunwei 已提交
31 32 33 34
  auto& param = this->Param<param_t>();
  auto w_dims = param.filter->dims();
  auto& ctx = this->ctx_->template As<ARMContext>();

H
HappyAngel 已提交
35 36
  auto paddings = *param.paddings;
  auto dilations = *param.dilations;
37 38
  int ic = w_dims[1] * param.groups;
  int oc = w_dims[0];
Y
Yan Chunwei 已提交
39 40
  int kh = w_dims[2];  // oihw
  int kw = w_dims[3];
H
HappyAngel 已提交
41
  int pad = paddings[0];
Y
Yan Chunwei 已提交
42
  int stride = param.strides[0];
T
TianXiaogang 已提交
43
  int threads = ctx.threads();
Y
Yan Chunwei 已提交
44

H
HappyAngel 已提交
45 46
  bool pads_equal =
      ((paddings[0] == paddings[1]) && (paddings[2] == paddings[3]));
47 48 49 50 51 52 53
  int chin = param.x->dims()[1];
  int hin = param.x->dims()[2];
  int win = param.x->dims()[3];
  int chout = param.output->dims()[1];
  int hout = param.output->dims()[2];
  int wout = param.output->dims()[3];

H
HappyAngel 已提交
54 55 56 57
  bool pads_all_equal = (pads_equal && paddings[0] == paddings[2]);

  bool kps_equal = (param.strides[0] == param.strides[1]) && (kw == kh);
  bool no_dilation = (dilations[0] == 1) && (dilations[1] == 1);
58
  bool flag_dw_3x3 = (kw == 3 && kh == 3 && (stride == 1 || stride == 2));
H
HappyAngel 已提交
59 60
  bool flag_dw_5x5 = pads_all_equal && ((kw == 5 && stride == 1) ||
                                        (kw == 5 && stride == 2 && pad == 2));
Y
Yan Chunwei 已提交
61 62
  bool flag_dw = flag_dw_3x3 || flag_dw_5x5;

63
  /// select conv impl
H
HappyAngel 已提交
64 65
  if (param.groups == ic && ic == oc && kps_equal && pads_equal &&
      no_dilation && flag_dw) {
66 67
    /// dw conv impl
    impl_ = new DepthwiseConv<PRECISION(kFloat), PRECISION(kFloat)>;
68
    // VLOG(3) << "invoking dw conv";
Y
Yan Chunwei 已提交
69
  } else if (param.groups == 1 && kw == 3 && stride == 1 && kps_equal &&
70
             no_dilation && pads_all_equal) {
T
TianXiaogang 已提交
71 72
    /// winograd conv impl
    impl_ = new WinogradConv<PRECISION(kFloat), PRECISION(kFloat)>;
73
    // VLOG(3) << "invoking winograd conv";
74 75
  } else if (param.groups == 1 && kw == 3 && stride == 2 &&
             chin * chout < 4 * hin * win && kps_equal && no_dilation) {
76 77
    /// direct conv impl
    impl_ = new DirectConv<PRECISION(kFloat), PRECISION(kFloat)>;
78
    // VLOG(3) << "invoking direct conv";
Y
Yan Chunwei 已提交
79
  } else {
80
    impl_ = new GemmLikeConv<PRECISION(kFloat), PRECISION(kFloat)>;
81
    // VLOG(3) << "invoking gemm like conv";
Y
Yan Chunwei 已提交
82
  }
83 84 85 86
  impl_->SetContext(std::move(this->ctx_));
  impl_->SetParam(param);
  impl_->PrepareForRun();
  is_first_epoch_ = false;
Y
Yan Chunwei 已提交
87 88
}

89 90
template <>
void ConvCompute<PRECISION(kInt8), PRECISION(kFloat)>::PrepareForRun() {
Y
Yan Chunwei 已提交
91 92 93 94 95
  auto& param = this->Param<param_t>();
  auto w_dims = param.filter->dims();

  auto& ctx = this->ctx_->template As<ARMContext>();

H
HappyAngel 已提交
96 97 98 99
  auto paddings = *param.paddings;
  auto dilations = *param.dilations;
  bool pads_equal =
      ((paddings[0] == paddings[1]) && (paddings[2] == paddings[3]));
100 101
  int ic = param.groups * w_dims[1];
  int oc = w_dims[0];
Y
Yan Chunwei 已提交
102 103
  int kh = w_dims[2];  // oihw
  int kw = w_dims[3];
H
HappyAngel 已提交
104 105
  int ph = paddings[0];
  int pw = paddings[2];
Y
Yan Chunwei 已提交
106 107
  int sh = param.strides[1];
  int sw = param.strides[0];
H
HappyAngel 已提交
108
  bool pads_all_equal = (pads_equal && paddings[0] == paddings[2]);
Y
Yan Chunwei 已提交
109 110

  bool kps_equal = (pw == ph) && (sh == sw) && (kw == kh);
H
HappyAngel 已提交
111 112 113 114
  bool no_dilation = (dilations[0] == 1) && (dilations[1] == 1);
  bool flag_dw_3x3 = (kw == 3 && kh == 3 && (sw == 1 || sw == 2));
  bool flag_dw_5x5 = pads_all_equal &&
                     ((kw == 5 && sw == 1) || (kw == 5 && sw == 2 && pw == 2));
Y
Yan Chunwei 已提交
115 116
  bool flag_dw = flag_dw_3x3 || flag_dw_5x5;

H
HappyAngel 已提交
117 118
  if (param.groups == ic && ic == oc && kps_equal && pads_equal &&
      no_dilation && flag_dw) {
119
    impl_ = new DepthwiseConv<PRECISION(kInt8), PRECISION(kFloat)>;
120
    // VLOG(3) << "Run DepthwiseConv Int8";
Y
Yan Chunwei 已提交
121 122
  } else if (param.groups == 1 && kw == 3 && (sw == 1 || sw == 2) &&
             kps_equal && no_dilation) {
123
    impl_ = new DirectConv<PRECISION(kInt8), PRECISION(kFloat)>;
124
    // VLOG(3) << "Run DirectConv Int8";
Y
Yan Chunwei 已提交
125
  } else {
126
    impl_ = new GemmLikeConv<PRECISION(kInt8), PRECISION(kFloat)>;
127
    // VLOG(3) << "Run GemmLikeConvInt8";
Y
Yan Chunwei 已提交
128
  }
129 130 131 132
  impl_->SetContext(std::move(this->ctx_));
  impl_->SetParam(param);
  impl_->PrepareForRun();
  is_first_epoch_ = false;
Y
Yan Chunwei 已提交
133 134
}

135 136
template <>
void ConvCompute<PRECISION(kInt8), PRECISION(kInt8)>::PrepareForRun() {
Y
Yan Chunwei 已提交
137
  auto& param = this->Param<param_t>();
138 139 140
  auto w_dims = param.filter->dims();

  auto& ctx = this->ctx_->template As<ARMContext>();
H
HappyAngel 已提交
141 142 143 144
  auto paddings = *param.paddings;
  auto dilations = *param.dilations;
  bool pads_equal =
      ((paddings[0] == paddings[1]) && (paddings[2] == paddings[3]));
145 146 147 148 149

  int ic = w_dims[1] * param.groups;
  int oc = w_dims[0];
  int kh = w_dims[2];  // oihw
  int kw = w_dims[3];
H
HappyAngel 已提交
150 151
  int ph = paddings[0];
  int pw = paddings[2];
152 153
  int sh = param.strides[1];
  int sw = param.strides[0];
H
HappyAngel 已提交
154
  bool pads_all_equal = (pads_equal && paddings[0] == paddings[2]);
Y
Yan Chunwei 已提交
155

156
  bool kps_equal = (pw == ph) && (sh == sw) && (kw == kh);
H
HappyAngel 已提交
157 158 159 160
  bool no_dilation = (dilations[0] == 1) && (dilations[1] == 1);
  bool flag_dw_3x3 = (kw == 3 && kh == 3 && (sw == 1 || sw == 2));
  bool flag_dw_5x5 = pads_all_equal &&
                     ((kw == 5 && sw == 1) || (kw == 5 && sw == 2 && pw == 2));
161 162
  bool flag_dw = flag_dw_3x3 || flag_dw_5x5;

H
HappyAngel 已提交
163 164
  if (param.groups == ic && ic == oc && kps_equal && pads_equal &&
      no_dilation && flag_dw) {
165
    impl_ = new DepthwiseConv<PRECISION(kInt8), PRECISION(kInt8)>;
166
    // VLOG(3) << "Run DepthwiseConv Int8";
167 168 169
  } else if (param.groups == 1 && kw == 3 && (sw == 1 || sw == 2) &&
             kps_equal && no_dilation) {
    impl_ = new DirectConv<PRECISION(kInt8), PRECISION(kInt8)>;
170
    // VLOG(3) << "Run DirectConv Int8";
171 172
  } else {
    impl_ = new GemmLikeConv<PRECISION(kInt8), PRECISION(kInt8)>;
173
    // VLOG(3) << "Run GemmLikeConvInt8";
174 175 176 177 178 179
  }
  impl_->SetContext(std::move(this->ctx_));
  impl_->SetParam(param);
  impl_->PrepareForRun();
  is_first_epoch_ = false;
}
Y
Yan Chunwei 已提交
180 181 182 183 184 185

}  // namespace arm
}  // namespace kernels
}  // namespace lite
}  // namespace paddle

186 187 188 189 190 191 192 193 194 195 196
typedef paddle::lite::kernels::arm::ConvCompute<PRECISION(kFloat),
                                                PRECISION(kFloat)>
    ConvFp32;
typedef paddle::lite::kernels::arm::ConvCompute<PRECISION(kInt8),
                                                PRECISION(kFloat)>
    ConvInt8_Fp32;
typedef paddle::lite::kernels::arm::ConvCompute<PRECISION(kInt8),
                                                PRECISION(kInt8)>
    ConvInt8_Int8;

REGISTER_LITE_KERNEL(conv2d, kARM, kFloat, kNCHW, ConvFp32, def)
Y
Yan Chunwei 已提交
197 198 199 200 201 202
    .BindInput("Input", {LiteType::GetTensorTy(TARGET(kARM))})
    .BindInput("Bias", {LiteType::GetTensorTy(TARGET(kARM))})
    .BindInput("Filter", {LiteType::GetTensorTy(TARGET(kARM))})
    .BindOutput("Output", {LiteType::GetTensorTy(TARGET(kARM))})
    .Finalize();

203
REGISTER_LITE_KERNEL(depthwise_conv2d, kARM, kFloat, kNCHW, ConvFp32, def)
Y
Yan Chunwei 已提交
204 205 206 207 208 209
    .BindInput("Input", {LiteType::GetTensorTy(TARGET(kARM))})
    .BindInput("Bias", {LiteType::GetTensorTy(TARGET(kARM))})
    .BindInput("Filter", {LiteType::GetTensorTy(TARGET(kARM))})
    .BindOutput("Output", {LiteType::GetTensorTy(TARGET(kARM))})
    .Finalize();

210
REGISTER_LITE_KERNEL(conv2d, kARM, kInt8, kNCHW, ConvInt8_Int8, int8_out)
Y
Yan Chunwei 已提交
211
    .BindInput("Input", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt8))})
212
    .BindInput("Bias", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFloat))})
Y
Yan Chunwei 已提交
213 214 215 216 217 218
    .BindInput("Filter",
               {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt8))})
    .BindOutput("Output",
                {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt8))})
    .Finalize();

219
REGISTER_LITE_KERNEL(conv2d, kARM, kInt8, kNCHW, ConvInt8_Fp32, fp32_out)
Y
Yan Chunwei 已提交
220
    .BindInput("Input", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt8))})
221
    .BindInput("Bias", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFloat))})
Y
Yan Chunwei 已提交
222 223 224 225 226 227 228
    .BindInput("Filter",
               {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt8))})
    .BindOutput("Output",
                {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFloat))})
    .Finalize();

REGISTER_LITE_KERNEL(
229
    depthwise_conv2d, kARM, kInt8, kNCHW, ConvInt8_Int8, int8_out)
Y
Yan Chunwei 已提交
230
    .BindInput("Input", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt8))})
231
    .BindInput("Bias", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFloat))})
Y
Yan Chunwei 已提交
232 233 234 235 236 237 238
    .BindInput("Filter",
               {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt8))})
    .BindOutput("Output",
                {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt8))})
    .Finalize();

REGISTER_LITE_KERNEL(
239
    depthwise_conv2d, kARM, kInt8, kNCHW, ConvInt8_Fp32, fp32_out)
Y
Yan Chunwei 已提交
240
    .BindInput("Input", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt8))})
241
    .BindInput("Bias", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFloat))})
Y
Yan Chunwei 已提交
242 243 244 245 246
    .BindInput("Filter",
               {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt8))})
    .BindOutput("Output",
                {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kFloat))})
    .Finalize();