conv_op.h 7.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

L
liym27 已提交
17
#include <algorithm>
Q
qingqing01 已提交
18
#include <string>
Q
qingqing01 已提交
19
#include <unordered_map>
20
#include <vector>
Y
Yi Wang 已提交
21 22
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
F
Feiyu Chan 已提交
23
#include "paddle/fluid/operators/layout_utils.h"
Y
Yi Wang 已提交
24 25
#include "paddle/fluid/operators/math/im2col.h"
#include "paddle/fluid/operators/math/vol2col.h"
26
#include "paddle/phi/kernels/funcs/blas/blas.h"
27 28 29 30 31

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
X
Xin Pan 已提交
32 33
constexpr int kConvMKLDNNFP32 = 1;
constexpr int kConvMKLDNNINT8 = 2;
34
constexpr int MaxKeyLength = 256;
35

武毅 已提交
36 37
// Base convolution operator definations for other conv
// like operators to reuse the implementation.
Y
Yang Yang 已提交
38 39
inline int ConvOutputSize(int input_size, int filter_size, int dilation,
                          int padding, int stride) {
C
chengduoZH 已提交
40
  const int dkernel = dilation * (filter_size - 1) + 1;
C
chengduoZH 已提交
41
  int output_size = (input_size + 2 * padding - dkernel) / stride + 1;
L
liym27 已提交
42 43
  PADDLE_ENFORCE_GT(
      output_size, 0,
44 45 46 47 48 49 50
      platform::errors::InvalidArgument(
          "The output's size is expected to be greater than 0. "
          "But recieved: output's size is %d. The output's size is computed by "
          "((input_size + 2 * padding - (dilation * (filter_size - 1) + 1)) / "
          "stride + 1), where input_size is %d, padding is %d, "
          "filter_size is %d, dilation is %d, stride is %d.",
          output_size, input_size, padding, filter_size, dilation, stride));
C
chengduoZH 已提交
51

武毅 已提交
52 53
  return output_size;
}
L
liym27 已提交
54 55 56 57 58

inline int ConvOutputSize(int input_size, int filter_size, int dilation,
                          int padding_1, int padding_2, int stride) {
  const int dkernel = dilation * (filter_size - 1) + 1;
  int output_size = (input_size + padding_1 + padding_2 - dkernel) / stride + 1;
59 60 61 62 63 64 65 66 67 68
  PADDLE_ENFORCE_GT(
      output_size, 0,
      platform::errors::InvalidArgument(
          "The output's size is expected to be greater than 0. "
          "But recieved: output's size is %d. The output's size is computed by "
          "((input_size + padding_1 + padding_2 - (dilation * (filter_size - "
          "1) + 1)) / stride + 1), where input_size is %d, padding is "
          "(%d, %d), filter_size is %d, dilation is %d, stride is %d.",
          output_size, input_size, padding_1, padding_2, filter_size, dilation,
          stride));
L
liym27 已提交
69 70 71

  return output_size;
}
72 73 74 75

template <typename T = int>
inline void UpdatePaddingAndDilation(std::vector<T>* paddings,
                                     std::vector<T>* dilation,
L
liym27 已提交
76 77
                                     const std::string padding_algorithm,
                                     const framework::DDim data_dims,
78 79
                                     const std::vector<T>& strides,
                                     const std::vector<T>& ksize) {
L
liym27 已提交
80
  // set padding size == data_dims.size() * 2
81
  auto data_shape = phi::vectorize<T>(data_dims);
82 83
  if (static_cast<int>(paddings->size()) == data_dims.size()) {
    for (int i = 0; i < data_dims.size(); ++i) {
84
      T copy_pad = *(paddings->begin() + 2 * i);
L
liym27 已提交
85 86 87 88 89
      paddings->insert(paddings->begin() + 2 * i + 1, copy_pad);
    }
  } else {
    PADDLE_ENFORCE_EQ(
        data_dims.size() * 2, paddings->size(),
90 91 92 93 94
        platform::errors::InvalidArgument(
            "Attribute padding's size should be the same or twice as the "
            "input's dimension. "
            "But recieved: padding's size is %d, padding is [%s]; input's "
            "dimension is %d, input's shape is [%s].",
95
            paddings->size(), phi::make_ddim(*paddings), data_dims.size(),
96
            data_dims));
L
liym27 已提交
97 98
  }

99
  // when padding_algorithm is "VALID" or "SAME"
L
liym27 已提交
100
  if (padding_algorithm == "SAME") {
101
    for (int i = 0; i < data_dims.size(); ++i) {
102 103
      T out_size = (data_dims[i] + strides[i] - 1) / strides[i];
      T pad_sum =
104 105
          std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i],
                   static_cast<T>(0));
106 107
      T pad_0 = pad_sum / 2;
      T pad_1 = pad_sum - pad_0;
L
liym27 已提交
108 109 110 111 112 113 114 115 116 117 118 119 120 121
      *(paddings->begin() + i * 2) = pad_0;
      *(paddings->begin() + i * 2 + 1) = pad_1;

      // dilation
      *(dilation->begin() + i) = 1;
    }

  } else if (padding_algorithm == "VALID") {
    for (auto it = paddings->begin(); it != paddings->end(); it++) {
      *it = 0;
    }
  }
}

122 123 124 125
inline bool IsExpand(const std::vector<int64_t>& filter_dim,
                     const std::vector<int>& strides,
                     const std::vector<int>& paddings,
                     const std::vector<int>& dilations) {
C
chengduoZH 已提交
126 127
  bool filter_1 = true, strides_1 = true, padding_0 = true, dilation_1 = true;
  for (size_t j = 0; j < strides.size(); ++j) {
C
chengduoZH 已提交
128
    filter_1 = filter_1 && (static_cast<int>(filter_dim[j + 2]) == 1);
C
chengduoZH 已提交
129 130 131
    strides_1 = strides_1 && (strides[j] == 1);
    padding_0 = padding_0 && (paddings[j] == 0);
    dilation_1 = dilation_1 && (dilations[j] == 1);
C
chengduoZH 已提交
132
  }
L
liym27 已提交
133 134 135 136 137
  if (paddings.size() != strides.size()) {
    for (size_t j = 0; j < paddings.size(); ++j) {
      padding_0 = padding_0 && (paddings[j] == 0);
    }
  }
C
chengduoZH 已提交
138
  return !(filter_1 && strides_1 && padding_0 && dilation_1);
C
chengduoZH 已提交
139
}
武毅 已提交
140 141 142 143 144

// Define Op classes in .h file so that other conv
// operator implementations can reuse the code.
class Conv2DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Q
qingqing01 已提交
145 146 147 148
  void Make() final;

 protected:
  virtual void Apply() {}
武毅 已提交
149 150
};

C
chengduoZH 已提交
151 152
class Conv3DOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Q
qingqing01 已提交
153 154 155 156 157 158 159 160
  void Make() final;

 protected:
  virtual void Apply() {}
};

class ConvOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
161
  std::unordered_map<std::string, std::string>& GetInputOutputWithSameType()
Q
qingqing01 已提交
162
      const override {
163
    static std::unordered_map<std::string, std::string> m{
Q
qingqing01 已提交
164
        {"Input", /*->*/ "Output"}};
165
    return m;
Q
qingqing01 已提交
166
  }
C
chengduoZH 已提交
167 168 169
};

class ConvOp : public framework::OperatorWithKernel {
武毅 已提交
170 171
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
172 173 174 175
  void InferShape(framework::InferShapeContext* ctx) const override {
    std::vector<int64_t> output_shape = ComputeOutputShape(ctx);

    OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output", "Conv");
176
    ctx->SetOutputDim("Output", phi::make_ddim(output_shape));
177 178
    ctx->ShareLoD("Input", "Output");
  }
179 180

 protected:
181 182 183
  std::vector<int64_t> ComputeOutputShape(
      framework::InferShapeContext* ctx) const;

184 185
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override;
186 187 188 189

  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override;
武毅 已提交
190 191
};

C
chengduoZH 已提交
192
class ConvOpGrad : public framework::OperatorWithKernel {
武毅 已提交
193 194 195
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;
196

Q
qingqing01 已提交
197 198 199
 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override;
200 201 202 203

  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override;
Q
qingqing01 已提交
204 205 206 207 208 209 210
};

class ConvOpDoubleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override;

211 212 213
 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override;
武毅 已提交
214 215
};

216 217
}  // namespace operators
}  // namespace paddle