conv_op.cc 8.6 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#include "lite/backends/npu/builder.h"
Z
zhupengyang 已提交
16
#include "lite/kernels/npu/bridges/registry.h"
Y
Yan Chunwei 已提交
17 18 19

namespace paddle {
namespace lite {
Z
zhupengyang 已提交
20
namespace kernels {
Y
Yan Chunwei 已提交
21
namespace npu {
Z
zhupengyang 已提交
22
namespace bridges {
Y
Yan Chunwei 已提交
23 24 25 26 27 28

node_map_type ConvConverter(const std::shared_ptr<lite::OpLite> conv_op,
                            const node_map_type& inputs_map) {
  auto scope = conv_op->scope();
  auto op_info = conv_op->op_info();
  auto op_type = op_info->Type();
29
  auto unique_op_type = lite::npu::UniqueName(op_type);
30
  LOG(INFO) << "[NPU] Converting " << op_type << "... ";
Y
Yan Chunwei 已提交
31

Y
Yan Chunwei 已提交
32
  // get input, filter and op attributes
Y
Yan Chunwei 已提交
33 34 35
  auto input_var_name = op_info->Input("Input").front();
  auto input = scope->FindVar(input_var_name)->GetMutable<lite::Tensor>();
  auto input_dims = input->dims();
36 37 38
  auto output_var_name = op_info->Output("Output").front();
  auto output = scope->FindVar(output_var_name)->GetMutable<lite::Tensor>();
  auto output_dims = output->dims();
Y
Yan Chunwei 已提交
39 40 41
  auto filter_var_name = op_info->Input("Filter").front();
  auto filter = scope->FindVar(filter_var_name)->GetMutable<lite::Tensor>();
  auto filter_dims = filter->dims();
42 43 44
  auto bs = input_dims[0];
  auto ic = input_dims[1];
  auto oc = filter_dims[0];
Y
Yan Chunwei 已提交
45
  CHECK_EQ(input_dims.size(), 4);
46
  CHECK_EQ(output_dims.size(), 4);
Y
Yan Chunwei 已提交
47
  CHECK_EQ(filter_dims.size(), 4);
48 49
  CHECK_EQ(output_dims[0], bs);
  CHECK_EQ(output_dims[1], oc);
Y
Yan Chunwei 已提交
50 51 52 53 54 55 56 57 58 59 60 61
  auto strides = op_info->GetAttr<std::vector<int>>("strides");
  auto paddings = op_info->GetAttr<std::vector<int>>("paddings");
  auto groups = op_info->GetAttr<int>("groups");
  auto dilations = op_info->GetAttr<std::vector<int>>("dilations");
  auto fuse_relu = op_info->GetAttr<bool>("fuse_relu");
  CHECK_EQ(strides.size(), 2);
  CHECK_EQ(paddings.size(), 2);
  CHECK_EQ(dilations.size(), 2);

  // check depthwise mode, and decide whether use ConvolutionDepthwise Op
  bool use_depthwise_conv =
      false;  // whether use ge::op::ConvolutionDepthwise ?
62
  bool is_depthwise_mode = ic == groups && oc == groups;
Y
Yan Chunwei 已提交
63 64 65 66
  if (is_depthwise_mode &&
      !((groups == 1 || groups >= 5) && dilations[0] == 1 &&
        dilations[1] == 1)) {
    use_depthwise_conv = true;
67 68 69 70
    LOG(WARNING) << "[NPU] For depthwise mode, dilation = 1 and groups >= 5 "
                    "(or groups = 1) is only supported in Convolution Op, so "
                    "force to use ConvolutionDepthwise Op, but may lead poor "
                    "performance.";
Y
Yan Chunwei 已提交
71 72 73 74
  }

  // check input
  CHECK(inputs_map.count(input_var_name));
75
  lite::npu::OpList::Global().add(inputs_map.at(input_var_name));
Y
Yan Chunwei 已提交
76 77 78 79

  // create filter node
  CHECK(!inputs_map.count(filter_var_name));
  auto filter_const_node = std::make_shared<ge::op::Const>(filter_var_name);
80
  filter_const_node->set_attr_value(lite::npu::CvtTensor(filter));
81
  lite::npu::OpList::Global().add(filter_const_node);
Y
Yan Chunwei 已提交
82 83

  // create bias node if has bias
84 85 86 87 88 89
  // supports the bias nodes with the following dimensions
  // 0: {oc}
  // 1: {1, oc, oh, ow}
  // 2: {n, oc, oh, ow}
  std::shared_ptr<ge::Operator> bias_node = nullptr;
  bool is_channel_bias = false;
90
  if (lite::npu::HasInputArg(op_info, scope, "Bias")) {
Y
Yan Chunwei 已提交
91 92
    auto bias_var_name = op_info->Input("Bias").front();
    auto* bias = scope->FindVar(bias_var_name)->GetMutable<lite::Tensor>();
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
    auto bias_dims = bias->dims();
    auto bias_data_size = bias_dims.production();
    auto output_data_size = output_dims.production();
    std::vector<int64_t> bias_shape;
    if (bias_data_size == oc) {
      // 0: {oc}
      bias_shape = {1, oc, 1, 1};
      is_channel_bias = true;
    } else if (bias_data_size == output_data_size / bs) {
      // 1: {1, oc, oh, ow}
      bias_shape = {1, output_dims[1], output_dims[2], output_dims[3]};
    } else if (bias_data_size == output_data_size) {
      // 2: {n, oc, oh, ow}
      bias_shape = output_dims.Vectorize();
    } else {
      LOG(ERROR) << "bias dimension " << bias_dims
                 << " isn't supported in conv2d Op when output dimension is "
                 << output_dims;
    }
    if (inputs_map.count(bias_var_name)) {
      // bias node from input map
      bias_node = inputs_map.at(bias_var_name);
    } else {
      // bias node with const data
      auto bias_const_node = std::make_shared<ge::op::Const>(bias_var_name);
118
      bias_const_node->set_attr_value(lite::npu::CvtTensor(bias, bias_shape));
119 120
      bias_node = bias_const_node;
    }
121
    lite::npu::OpList::Global().add(bias_node);
Y
Yan Chunwei 已提交
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
  }

  // create conv node and set input, filter, bias nodes and attributes
  std::shared_ptr<ge::Operator> conv_node = nullptr;
  if (use_depthwise_conv && is_depthwise_mode) {
    auto depthwise_conv_node =
        std::make_shared<ge::op::ConvolutionDepthwise>(unique_op_type);
    depthwise_conv_node->set_input_x(*inputs_map.at(input_var_name));
    depthwise_conv_node->set_input_filter(*filter_const_node);
    depthwise_conv_node->set_attr_mode(1);
    depthwise_conv_node->set_attr_algo(0);
    depthwise_conv_node->set_attr_format(0);    // NCHW
    depthwise_conv_node->set_attr_pad_mode(5);  // VALID
    depthwise_conv_node->set_attr_group(groups);
    depthwise_conv_node->set_attr_pad(ge::AttrValue::LIST_INT(
        {paddings[0], paddings[0], paddings[1], paddings[1]}));
    depthwise_conv_node->set_attr_dilation(
        ge::AttrValue::LIST_INT({dilations[0], dilations[1]}));
    depthwise_conv_node->set_attr_stride(
        ge::AttrValue::LIST_INT({strides[0], strides[1]}));
    depthwise_conv_node->set_attr_kernel(
        ge::AttrValue::LIST_INT({filter_dims[2], filter_dims[3]}));
144
    lite::npu::OpList::Global().add(depthwise_conv_node);
Y
Yan Chunwei 已提交
145
    conv_node = depthwise_conv_node;
146 147 148
    // ConvolutionDepthwise Op doesn't support bias, so append Add node to
    // support bias
    if (bias_node != nullptr) {
Y
Yan Chunwei 已提交
149 150
      auto add_node = std::make_shared<ge::op::Add>(unique_op_type + "/add");
      add_node->set_input_x1(*depthwise_conv_node);
151
      add_node->set_input_x2(*bias_node);
152
      lite::npu::OpList::Global().add(add_node);
Y
Yan Chunwei 已提交
153
      conv_node = add_node;
Y
Yan Chunwei 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
    }
  } else {
    auto common_conv_node =
        std::make_shared<ge::op::Convolution>(unique_op_type);
    common_conv_node->set_input_x(*inputs_map.at(input_var_name));
    common_conv_node->set_input_w(*filter_const_node);
    common_conv_node->set_attr_mode(1);
    common_conv_node->set_attr_pad_mode(0);  // NOTSET
    common_conv_node->set_attr_group(groups);
    common_conv_node->set_attr_pad(ge::AttrValue::LIST_INT(
        {paddings[0], paddings[0], paddings[1], paddings[1]}));
    common_conv_node->set_attr_dilation(
        ge::AttrValue::LIST_INT({dilations[0], dilations[1]}));
    common_conv_node->set_attr_stride(
        ge::AttrValue::LIST_INT({strides[0], strides[1]}));
    common_conv_node->set_attr_kernel(
        ge::AttrValue::LIST_INT({filter_dims[2], filter_dims[3]}));
171
    lite::npu::OpList::Global().add(common_conv_node);
Y
Yan Chunwei 已提交
172
    conv_node = common_conv_node;
173 174 175 176 177 178 179 180 181
    // Convolution Op only support bias with dimension {1, oc, 1, 1},
    // so append Add node if dimension is {1, oc, oh, ow} or (n, oc, oh, ow)
    if (bias_node != nullptr) {
      if (is_channel_bias) {
        common_conv_node->set_input_b(*bias_node);
      } else {
        auto add_node = std::make_shared<ge::op::Add>(unique_op_type + "/add");
        add_node->set_input_x1(*common_conv_node);
        add_node->set_input_x2(*bias_node);
182
        lite::npu::OpList::Global().add(add_node);
183 184 185
        conv_node = add_node;
      }
    }
Y
Yan Chunwei 已提交
186 187 188 189 190 191 192 193 194
  }
  CHECK(conv_node);

  node_map_type outputs_map;
  if (fuse_relu) {
    // append relu node if fuse_relu is true
    auto relu_node =
        std::make_shared<ge::op::Activation>(unique_op_type + "/relu");
    relu_node->set_input_x(*conv_node);
195
    relu_node->set_attr_mode(lite::npu::CvtActMode("relu"));
196
    lite::npu::OpList::Global().add(relu_node);
Y
Yan Chunwei 已提交
197
    outputs_map[op_info->Output("Output").front()] = relu_node;
Y
Yan Chunwei 已提交
198
  } else {
Y
Yan Chunwei 已提交
199
    outputs_map[op_info->Output("Output").front()] = conv_node;
Y
Yan Chunwei 已提交
200 201 202 203
  }
  return outputs_map;
}

Z
zhupengyang 已提交
204
}  // namespace bridges
Y
Yan Chunwei 已提交
205
}  // namespace npu
Z
zhupengyang 已提交
206
}  // namespace kernels
Y
Yan Chunwei 已提交
207 208 209
}  // namespace lite
}  // namespace paddle

Z
zhupengyang 已提交
210 211 212
REGISTER_NPU_BRIDGE(conv2d, paddle::lite::kernels::npu::bridges::ConvConverter);
REGISTER_NPU_BRIDGE(depthwise_conv2d,
                    paddle::lite::kernels::npu::bridges::ConvConverter);