conv_op.h 7.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

L
liym27 已提交
17
#include <algorithm>
Q
qingqing01 已提交
18
#include <string>
Q
qingqing01 已提交
19
#include <unordered_map>
20
#include <vector>
21

Y
Yi Wang 已提交
22 23
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
F
Feiyu Chan 已提交
24
#include "paddle/fluid/operators/layout_utils.h"
Y
Yi Wang 已提交
25 26
#include "paddle/fluid/operators/math/im2col.h"
#include "paddle/fluid/operators/math/vol2col.h"
27
#include "paddle/phi/kernels/funcs/blas/blas.h"
28 29 30 31 32

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
X
Xin Pan 已提交
33 34
constexpr int kConvMKLDNNFP32 = 1;
constexpr int kConvMKLDNNINT8 = 2;
35
constexpr int MaxKeyLength = 256;
36

武毅 已提交
37 38
// Base convolution operator definations for other conv
// like operators to reuse the implementation.
Y
Yang Yang 已提交
39 40
inline int ConvOutputSize(int input_size, int filter_size, int dilation,
                          int padding, int stride) {
C
chengduoZH 已提交
41
  const int dkernel = dilation * (filter_size - 1) + 1;
C
chengduoZH 已提交
42
  int output_size = (input_size + 2 * padding - dkernel) / stride + 1;
L
liym27 已提交
43 44
  PADDLE_ENFORCE_GT(
      output_size, 0,
45 46
      platform::errors::InvalidArgument(
          "The output's size is expected to be greater than 0. "
47
          "But received: output's size is %d. The output's size is computed by "
48 49 50 51
          "((input_size + 2 * padding - (dilation * (filter_size - 1) + 1)) / "
          "stride + 1), where input_size is %d, padding is %d, "
          "filter_size is %d, dilation is %d, stride is %d.",
          output_size, input_size, padding, filter_size, dilation, stride));
C
chengduoZH 已提交
52

武毅 已提交
53 54
  return output_size;
}
L
liym27 已提交
55 56 57 58 59

inline int ConvOutputSize(int input_size, int filter_size, int dilation,
                          int padding_1, int padding_2, int stride) {
  const int dkernel = dilation * (filter_size - 1) + 1;
  int output_size = (input_size + padding_1 + padding_2 - dkernel) / stride + 1;
60 61 62 63
  PADDLE_ENFORCE_GT(
      output_size, 0,
      platform::errors::InvalidArgument(
          "The output's size is expected to be greater than 0. "
64
          "But received: output's size is %d. The output's size is computed by "
65 66 67 68 69
          "((input_size + padding_1 + padding_2 - (dilation * (filter_size - "
          "1) + 1)) / stride + 1), where input_size is %d, padding is "
          "(%d, %d), filter_size is %d, dilation is %d, stride is %d.",
          output_size, input_size, padding_1, padding_2, filter_size, dilation,
          stride));
L
liym27 已提交
70 71 72

  return output_size;
}
73 74 75 76

template <typename T = int>
inline void UpdatePaddingAndDilation(std::vector<T>* paddings,
                                     std::vector<T>* dilation,
L
liym27 已提交
77 78
                                     const std::string padding_algorithm,
                                     const framework::DDim data_dims,
79 80
                                     const std::vector<T>& strides,
                                     const std::vector<T>& ksize) {
L
liym27 已提交
81
  // set padding size == data_dims.size() * 2
82
  auto data_shape = phi::vectorize<T>(data_dims);
83 84
  if (static_cast<int>(paddings->size()) == data_dims.size()) {
    for (int i = 0; i < data_dims.size(); ++i) {
85
      T copy_pad = *(paddings->begin() + 2 * i);
L
liym27 已提交
86 87 88 89 90
      paddings->insert(paddings->begin() + 2 * i + 1, copy_pad);
    }
  } else {
    PADDLE_ENFORCE_EQ(
        data_dims.size() * 2, paddings->size(),
91 92 93
        platform::errors::InvalidArgument(
            "Attribute padding's size should be the same or twice as the "
            "input's dimension. "
94
            "But received: padding's size is %d, padding is [%s]; input's "
95
            "dimension is %d, input's shape is [%s].",
96
            paddings->size(), phi::make_ddim(*paddings), data_dims.size(),
97
            data_dims));
L
liym27 已提交
98 99
  }

100
  // when padding_algorithm is "VALID" or "SAME"
L
liym27 已提交
101
  if (padding_algorithm == "SAME") {
102
    for (int i = 0; i < data_dims.size(); ++i) {
103 104
      T out_size = (data_dims[i] + strides[i] - 1) / strides[i];
      T pad_sum =
105 106
          std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i],
                   static_cast<T>(0));
107 108
      T pad_0 = pad_sum / 2;
      T pad_1 = pad_sum - pad_0;
L
liym27 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122
      *(paddings->begin() + i * 2) = pad_0;
      *(paddings->begin() + i * 2 + 1) = pad_1;

      // dilation
      *(dilation->begin() + i) = 1;
    }

  } else if (padding_algorithm == "VALID") {
    for (auto it = paddings->begin(); it != paddings->end(); it++) {
      *it = 0;
    }
  }
}

123 124 125 126
inline bool IsExpand(const std::vector<int64_t>& filter_dim,
                     const std::vector<int>& strides,
                     const std::vector<int>& paddings,
                     const std::vector<int>& dilations) {
C
chengduoZH 已提交
127 128
  bool filter_1 = true, strides_1 = true, padding_0 = true, dilation_1 = true;
  for (size_t j = 0; j < strides.size(); ++j) {
C
chengduoZH 已提交
129
    filter_1 = filter_1 && (static_cast<int>(filter_dim[j + 2]) == 1);
C
chengduoZH 已提交
130 131 132
    strides_1 = strides_1 && (strides[j] == 1);
    padding_0 = padding_0 && (paddings[j] == 0);
    dilation_1 = dilation_1 && (dilations[j] == 1);
C
chengduoZH 已提交
133
  }
L
liym27 已提交
134 135 136 137 138
  if (paddings.size() != strides.size()) {
    for (size_t j = 0; j < paddings.size(); ++j) {
      padding_0 = padding_0 && (paddings[j] == 0);
    }
  }
C
chengduoZH 已提交
139
  return !(filter_1 && strides_1 && padding_0 && dilation_1);
C
chengduoZH 已提交
140
}
武毅 已提交
141 142 143 144 145

// Define Op classes in .h file so that other conv
// operator implementations can reuse the code.
class Conv2DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Q
qingqing01 已提交
146 147 148 149
  void Make() final;

 protected:
  virtual void Apply() {}
武毅 已提交
150 151
};

C
chengduoZH 已提交
152 153
class Conv3DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Q
qingqing01 已提交
154 155 156 157 158 159 160 161
  void Make() final;

 protected:
  virtual void Apply() {}
};

class ConvOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
162
  std::unordered_map<std::string, std::string>& GetInputOutputWithSameType()
Q
qingqing01 已提交
163
      const override {
164
    static std::unordered_map<std::string, std::string> m{
Q
qingqing01 已提交
165
        {"Input", /*->*/ "Output"}};
166
    return m;
Q
qingqing01 已提交
167
  }
C
chengduoZH 已提交
168 169 170
};

class ConvOp : public framework::OperatorWithKernel {
武毅 已提交
171 172
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
173 174 175 176
  void InferShape(framework::InferShapeContext* ctx) const override {
    std::vector<int64_t> output_shape = ComputeOutputShape(ctx);

    OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output", "Conv");
177
    ctx->SetOutputDim("Output", phi::make_ddim(output_shape));
178 179
    ctx->ShareLoD("Input", "Output");
  }
180 181

 protected:
182 183 184
  std::vector<int64_t> ComputeOutputShape(
      framework::InferShapeContext* ctx) const;

185 186
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override;
187 188 189 190

  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override;
武毅 已提交
191 192
};

C
chengduoZH 已提交
193
class ConvOpGrad : public framework::OperatorWithKernel {
武毅 已提交
194 195 196
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;
197

Q
qingqing01 已提交
198 199 200
 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override;
201 202 203 204

  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override;
Q
qingqing01 已提交
205 206 207 208 209 210 211
};

class ConvOpDoubleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;

212 213 214
 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override;
武毅 已提交
215 216
};

217 218
}  // namespace operators
}  // namespace paddle