conv_op.h 7.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

L
liym27 已提交
17
#include <algorithm>
Q
qingqing01 已提交
18
#include <string>
Q
qingqing01 已提交
19
#include <unordered_map>
20
#include <vector>
21

Y
Yi Wang 已提交
22 23
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
F
Feiyu Chan 已提交
24
#include "paddle/fluid/operators/layout_utils.h"
Y
Yi Wang 已提交
25 26
#include "paddle/fluid/operators/math/im2col.h"
#include "paddle/fluid/operators/math/vol2col.h"
27
#include "paddle/phi/kernels/funcs/blas/blas.h"
28 29 30 31 32

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
X
Xin Pan 已提交
33 34
constexpr int kConvMKLDNNFP32 = 1;
constexpr int kConvMKLDNNINT8 = 2;
35
constexpr int kConvMKLDNNINT8WS8 = 3;
36
constexpr int MaxKeyLength = 256;
37

武毅 已提交
38 39
// Base convolution operator definations for other conv
// like operators to reuse the implementation.
Y
Yang Yang 已提交
40 41
inline int ConvOutputSize(int input_size, int filter_size, int dilation,
                          int padding, int stride) {
C
chengduoZH 已提交
42
  const int dkernel = dilation * (filter_size - 1) + 1;
C
chengduoZH 已提交
43
  int output_size = (input_size + 2 * padding - dkernel) / stride + 1;
L
liym27 已提交
44 45
  PADDLE_ENFORCE_GT(
      output_size, 0,
46 47
      platform::errors::InvalidArgument(
          "The output's size is expected to be greater than 0. "
48
          "But received: output's size is %d. The output's size is computed by "
49 50 51 52
          "((input_size + 2 * padding - (dilation * (filter_size - 1) + 1)) / "
          "stride + 1), where input_size is %d, padding is %d, "
          "filter_size is %d, dilation is %d, stride is %d.",
          output_size, input_size, padding, filter_size, dilation, stride));
C
chengduoZH 已提交
53

武毅 已提交
54 55
  return output_size;
}
L
liym27 已提交
56 57 58 59 60

inline int ConvOutputSize(int input_size, int filter_size, int dilation,
                          int padding_1, int padding_2, int stride) {
  const int dkernel = dilation * (filter_size - 1) + 1;
  int output_size = (input_size + padding_1 + padding_2 - dkernel) / stride + 1;
61 62 63 64
  PADDLE_ENFORCE_GT(
      output_size, 0,
      platform::errors::InvalidArgument(
          "The output's size is expected to be greater than 0. "
65
          "But received: output's size is %d. The output's size is computed by "
66 67 68 69 70
          "((input_size + padding_1 + padding_2 - (dilation * (filter_size - "
          "1) + 1)) / stride + 1), where input_size is %d, padding is "
          "(%d, %d), filter_size is %d, dilation is %d, stride is %d.",
          output_size, input_size, padding_1, padding_2, filter_size, dilation,
          stride));
L
liym27 已提交
71 72 73

  return output_size;
}
74 75 76 77

template <typename T = int>
inline void UpdatePaddingAndDilation(std::vector<T>* paddings,
                                     std::vector<T>* dilation,
L
liym27 已提交
78 79
                                     const std::string padding_algorithm,
                                     const framework::DDim data_dims,
80 81
                                     const std::vector<T>& strides,
                                     const std::vector<T>& ksize) {
L
liym27 已提交
82
  // set padding size == data_dims.size() * 2
83
  auto data_shape = phi::vectorize<T>(data_dims);
84 85
  if (static_cast<int>(paddings->size()) == data_dims.size()) {
    for (int i = 0; i < data_dims.size(); ++i) {
86
      T copy_pad = *(paddings->begin() + 2 * i);
L
liym27 已提交
87 88 89 90 91
      paddings->insert(paddings->begin() + 2 * i + 1, copy_pad);
    }
  } else {
    PADDLE_ENFORCE_EQ(
        data_dims.size() * 2, paddings->size(),
92 93 94
        platform::errors::InvalidArgument(
            "Attribute padding's size should be the same or twice as the "
            "input's dimension. "
95
            "But received: padding's size is %d, padding is [%s]; input's "
96
            "dimension is %d, input's shape is [%s].",
97
            paddings->size(), phi::make_ddim(*paddings), data_dims.size(),
98
            data_dims));
L
liym27 已提交
99 100
  }

101
  // when padding_algorithm is "VALID" or "SAME"
L
liym27 已提交
102
  if (padding_algorithm == "SAME") {
103
    for (int i = 0; i < data_dims.size(); ++i) {
104 105
      T out_size = (data_dims[i] + strides[i] - 1) / strides[i];
      T pad_sum =
106 107
          std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i],
                   static_cast<T>(0));
108 109
      T pad_0 = pad_sum / 2;
      T pad_1 = pad_sum - pad_0;
L
liym27 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123
      *(paddings->begin() + i * 2) = pad_0;
      *(paddings->begin() + i * 2 + 1) = pad_1;

      // dilation
      *(dilation->begin() + i) = 1;
    }

  } else if (padding_algorithm == "VALID") {
    for (auto it = paddings->begin(); it != paddings->end(); it++) {
      *it = 0;
    }
  }
}

124 125 126 127
inline bool IsExpand(const std::vector<int64_t>& filter_dim,
                     const std::vector<int>& strides,
                     const std::vector<int>& paddings,
                     const std::vector<int>& dilations) {
C
chengduoZH 已提交
128 129
  bool filter_1 = true, strides_1 = true, padding_0 = true, dilation_1 = true;
  for (size_t j = 0; j < strides.size(); ++j) {
C
chengduoZH 已提交
130
    filter_1 = filter_1 && (static_cast<int>(filter_dim[j + 2]) == 1);
C
chengduoZH 已提交
131 132 133
    strides_1 = strides_1 && (strides[j] == 1);
    padding_0 = padding_0 && (paddings[j] == 0);
    dilation_1 = dilation_1 && (dilations[j] == 1);
C
chengduoZH 已提交
134
  }
L
liym27 已提交
135 136 137 138 139
  if (paddings.size() != strides.size()) {
    for (size_t j = 0; j < paddings.size(); ++j) {
      padding_0 = padding_0 && (paddings[j] == 0);
    }
  }
C
chengduoZH 已提交
140
  return !(filter_1 && strides_1 && padding_0 && dilation_1);
C
chengduoZH 已提交
141
}
武毅 已提交
142 143 144 145 146

// Define Op classes in .h file so that other conv
// operator implementations can reuse the code.
class Conv2DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Q
qingqing01 已提交
147 148 149 150
  void Make() final;

 protected:
  virtual void Apply() {}
武毅 已提交
151 152
};

C
chengduoZH 已提交
153 154
class Conv3DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Q
qingqing01 已提交
155 156 157 158 159 160 161 162
  void Make() final;

 protected:
  virtual void Apply() {}
};

class ConvOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
163
  std::unordered_map<std::string, std::string>& GetInputOutputWithSameType()
Q
qingqing01 已提交
164
      const override {
165
    static std::unordered_map<std::string, std::string> m{
Q
qingqing01 已提交
166
        {"Input", /*->*/ "Output"}};
167
    return m;
Q
qingqing01 已提交
168
  }
C
chengduoZH 已提交
169 170 171
};

class ConvOp : public framework::OperatorWithKernel {
武毅 已提交
172 173
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
174 175 176 177
  void InferShape(framework::InferShapeContext* ctx) const override {
    std::vector<int64_t> output_shape = ComputeOutputShape(ctx);

    OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output", "Conv");
178
    ctx->SetOutputDim("Output", phi::make_ddim(output_shape));
179 180
    ctx->ShareLoD("Input", "Output");
  }
181 182

 protected:
183 184 185
  std::vector<int64_t> ComputeOutputShape(
      framework::InferShapeContext* ctx) const;

186 187
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override;
188 189 190 191

  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override;
武毅 已提交
192 193
};

C
chengduoZH 已提交
194
class ConvOpGrad : public framework::OperatorWithKernel {
武毅 已提交
195 196 197
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;
198

Q
qingqing01 已提交
199 200 201
 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override;
202 203 204 205

  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override;
Q
qingqing01 已提交
206 207 208 209 210 211 212
};

class ConvOpDoubleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;

213 214 215
 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override;
武毅 已提交
216 217
};

218 219
}  // namespace operators
}  // namespace paddle