affine_channel_op.cc 13.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Indicesou may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Z
Zeng Jinle 已提交
15 16
#include <string>
#include <unordered_map>
17

18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"

namespace paddle {
namespace operators {

class AffineChannelOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X",
             "(Tensor) Feature map input can be a 4D tensor with order NCHW "
             "or NHWC. It also can be a 2D tensor and C is the second "
             "dimension.");
    AddInput("Scale",
             "(Tensor) 1D input of shape (C), the c-th element "
             "is the scale factor of the affine transformation "
             "for the c-th channel of the input.");
    AddInput("Bias",
             "(Tensor) 1D input of shape (C), the c-th element "
             "is the bias of the affine transformation for the "
             "c-th channel of the input.");
    AddAttr<std::string>(
        "data_layout",
        "(string, default NCHW) Only used in "
        "An optional string from: \"NHWC\", \"NCHW\". "
        "Defaults to \"NHWC\". Specify the data format of the output data, "
        "the input will be transformed automatically. ")
        .SetDefault("AnyLayout");
    AddOutput("Out", "(Tensor) A tensor of the same shape and order with X.");
    AddComment(R"DOC(

Applies a separate affine transformation to each channel of the input. Useful
for replacing spatial batch norm with its equivalent fixed transformation.
The input also can be 2D tensor and applies a affine transformation in second
dimension.

$$Out = Scale*X + Bias$$

)DOC");
  }
};

class AffineChannelOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override {
65 66 67 68
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "AffineChannel");
    OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "AffineChannel");
    OP_INOUT_CHECK(ctx->HasInput("Bias"), "Input", "Bias", "AffineChannel");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "AffineChannel");
69 70 71 72

    auto x_dims = ctx->GetInputDim("X");
    auto scale_dims = ctx->GetInputDim("Scale");
    auto b_dims = ctx->GetInputDim("Bias");
73 74
    const phi::DataLayout data_layout =
        phi::StringToDataLayout(ctx->Attrs().Get<std::string>("data_layout"));
75

76 77 78
    const int64_t C =
        (data_layout == phi::DataLayout::kNCHW ? x_dims[1]
                                               : x_dims[x_dims.size() - 1]);
79

80
    PADDLE_ENFORCE_EQ(
81 82
        scale_dims.size(),
        1UL,
83 84 85 86
        platform::errors::InvalidArgument(
            "The dimensions of Input(Scale) must be 1,"
            "But received the dimensions of Input(Scale) is [%d] ",
            scale_dims.size()));
87 88
    PADDLE_ENFORCE_EQ(b_dims.size(),
                      1UL,
89 90 91 92
                      platform::errors::InvalidArgument(
                          "The dimensions of Input(Bias) must be 1,"
                          "But received the dimensions of Input(Bias) is [%d] ",
                          scale_dims.size()));
T
tink2123 已提交
93
    if (ctx->IsRuntime() || scale_dims[0] > 0) {
94
      PADDLE_ENFORCE_EQ(
95 96
          scale_dims[0],
          C,
97 98 99
          platform::errors::InvalidArgument(
              "The first dimension value of Input(Scale) must be [%d],"
              "But received [%d].",
100 101
              C,
              scale_dims[0]));
T
tink2123 已提交
102 103
    }
    if (ctx->IsRuntime() || b_dims[0] > 0) {
104
      PADDLE_ENFORCE_EQ(
105 106
          b_dims[0],
          C,
107 108 109
          platform::errors::InvalidArgument(
              "The first dimension value of Input(Bias) must be [%d],"
              "But received [%d].",
110 111
              C,
              b_dims[0]));
T
tink2123 已提交
112
    }
113

114 115 116 117 118 119 120 121 122
    ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
    ctx->ShareLoD("X", "Out");
  }
};

class AffineChannelOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override {
123 124 125 126
    OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")),
                   "Input",
                   framework::GradVarName("Out"),
                   "AffineChannelGrad");
127
    if (ctx->HasOutput(framework::GradVarName("X"))) {
128 129
      OP_INOUT_CHECK(
          ctx->HasInput("Scale"), "Input", "Scale", "AffineChannelGrad");
130 131 132 133 134
      ctx->SetOutputDim(framework::GradVarName("X"),
                        ctx->GetInputDim(framework::GradVarName("Out")));
    }
    if (ctx->HasOutput(framework::GradVarName("Scale"))) {
      // Scale@GRAD and Bias@GRAD must exist at the same time.
135 136 137 138
      OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("Bias")),
                     "Output",
                     framework::GradVarName("Bias"),
                     "AffineChannelGrad");
139
      OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "AffineChannelGrad");
140 141 142 143 144 145
      ctx->SetOutputDim(framework::GradVarName("Scale"),
                        ctx->GetInputDim("Scale"));
      ctx->SetOutputDim(framework::GradVarName("Bias"),
                        ctx->GetInputDim("Scale"));
    }
  }
Z
Zeng Jinle 已提交
146 147

 protected:
148
  phi::KernelKey GetExpectedKernelType(
Z
Zeng Jinle 已提交
149
      const framework::ExecutionContext& ctx) const override {
150 151 152
    return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(
                              ctx, framework::GradVarName("Out")),
                          ctx.GetPlace());
Z
Zeng Jinle 已提交
153
  }
154 155
};

H
hong 已提交
156 157
template <typename T>
class AffineChannelGradMaker : public framework::SingleGradOpMaker<T> {
158
 public:
H
hong 已提交
159
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
160

161
  void Apply(GradOpPtr<T> op) const override {
162
    op->SetType("affine_channel_grad");
H
hong 已提交
163 164 165
    op->SetInput("X", this->Input("X"));
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    op->SetInput("Scale", this->Input("Scale"));
166

H
hong 已提交
167
    op->SetAttrMap(this->Attrs());
168

H
hong 已提交
169 170 171
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
    op->SetOutput(framework::GradVarName("Scale"), this->InputGrad("Scale"));
    op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias"));
172 173 174
  }
};

175 176 177 178 179 180 181 182 183 184 185 186
template <typename T>
using EigenArrayMap =
    Eigen::Map<Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic>>;
template <typename T>
using ConstEigenArrayMap =
    Eigen::Map<const Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic>>;
template <typename T>
using EigenVectorArrayMap = Eigen::Map<Eigen::Array<T, Eigen::Dynamic, 1>>;
template <typename T>
using ConstEigenVectorArrayMap =
    Eigen::Map<const Eigen::Array<T, Eigen::Dynamic, 1>>;

187
template <typename T, typename DeviceContext>
188 189 190
class AffineChannelKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
191 192 193
    auto* x = ctx.Input<phi::DenseTensor>("X");
    auto* scale = ctx.Input<phi::DenseTensor>("Scale");
    auto* bias = ctx.Input<phi::DenseTensor>("Bias");
194

195
    auto* y = ctx.Output<phi::DenseTensor>("Out");
196 197
    y->mutable_data<T>(ctx.GetPlace());

198 199
    const phi::DataLayout layout =
        phi::StringToDataLayout(ctx.Attr<std::string>("data_layout"));
200 201 202

    auto dims = x->dims();
    int N = dims[0];
203
    int C = layout == phi::DataLayout::kNCHW ? dims[1] : dims[dims.size() - 1];
204 205 206 207 208 209 210 211 212
    int HxW = x->numel() / N / C;

    auto* scale_d = scale->data<T>();
    auto* bias_d = bias->data<T>();
    ConstEigenVectorArrayMap<T> a_e(scale_d, C);
    ConstEigenVectorArrayMap<T> b_e(bias_d, C);

    auto* x_d = x->data<T>();
    auto* y_d = y->data<T>();
213
    if (layout == phi::DataLayout::kNCHW) {
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
      int stride = C * HxW;
      for (int i = 0; i < N; i++) {
        ConstEigenArrayMap<T> x_e(x_d, HxW, C);
        EigenArrayMap<T> y_e(y_d, HxW, C);
        y_e = (x_e.rowwise() * a_e.transpose()).rowwise() + b_e.transpose();
        x_d += stride;
        y_d += stride;
      }
    } else {
      int num = N * HxW;
      ConstEigenArrayMap<T> x_e(x_d, C, num);
      EigenArrayMap<T> y_e(y_d, C, num);
      y_e = (x_e.colwise() * a_e).colwise() + b_e;
    }
  }
};

231
template <typename T, typename DeviceContext>
232 233 234
class AffineChannelGradKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
235 236 237
    auto* x = ctx.Input<phi::DenseTensor>("X");
    auto* scale = ctx.Input<phi::DenseTensor>("Scale");
    auto* dy = ctx.Input<phi::DenseTensor>(framework::GradVarName("Out"));
238

239
    auto* dx = ctx.Output<phi::DenseTensor>(framework::GradVarName("X"));
240
    auto* dscale =
241 242
        ctx.Output<phi::DenseTensor>(framework::GradVarName("Scale"));
    auto* dbias = ctx.Output<phi::DenseTensor>(framework::GradVarName("Bias"));
243

244 245
    const phi::DataLayout layout =
        phi::StringToDataLayout(ctx.Attr<std::string>("data_layout"));
246 247 248

    auto dims = x->dims();
    int N = dims[0];
249
    int C = layout == phi::DataLayout::kNCHW ? dims[1] : dims[dims.size() - 1];
250 251 252 253 254 255 256 257 258 259 260 261
    int HxW = x->numel() / N / C;

    auto* dy_d = dy->data<T>();
    auto* scale_d = scale->data<T>();
    ConstEigenVectorArrayMap<T> scale_e(scale_d, C);

    T* dx_d = dx ? dx->mutable_data<T>(ctx.GetPlace()) : nullptr;
    T* dscale_d = dscale ? dscale->mutable_data<T>(ctx.GetPlace()) : nullptr;
    T* dbias_d = dbias ? dbias->mutable_data<T>(ctx.GetPlace()) : nullptr;
    EigenVectorArrayMap<T> dscale_e(dscale_d, C);
    EigenVectorArrayMap<T> dbias_e(dbias_d, C);

262
    if (layout == phi::DataLayout::kNCHW) {
263
      // compute dscale and dbias
264 265
      int stride = C * HxW;
      auto* original_dy_d = dy_d;
266
      if (dscale && dbias) {
Z
Zeng Jinle 已提交
267
        auto* x_d = x->data<T>();
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
        for (int i = 0; i < N; i++) {
          ConstEigenArrayMap<T> x_e(x_d, HxW, C);
          ConstEigenArrayMap<T> dy_e(dy_d, HxW, C);
          if (i == 0) {
            dscale_e = (x_e * dy_e).colwise().sum();
          } else {
            dscale_e += (x_e * dy_e).colwise().sum();
          }
          if (i == 0) {
            dbias_e = dy_e.colwise().sum();
          } else {
            dbias_e += dy_e.colwise().sum();
          }
          x_d += stride;
          dy_d += stride;
        }
      }
285

286 287
      // compute dx
      if (dx) {
288 289 290 291 292 293 294 295
        dy_d = original_dy_d;
        for (int i = 0; i < N; i++) {
          ConstEigenArrayMap<T> dy_e(dy_d, HxW, C);
          EigenArrayMap<T> dx_e(dx_d, HxW, C);
          dx_e = dy_e.rowwise() * scale_e.transpose();
          dy_d += stride;
          dx_d += stride;
        }
296
      }
297 298 299
    } else {
      int num = N * HxW;
      ConstEigenArrayMap<T> dy_e(dy_d, C, num);
300 301
      // compute dscale and dbias
      if (dscale && dbias) {
Z
Zeng Jinle 已提交
302
        auto* x_d = x->data<T>();
303 304 305 306
        ConstEigenArrayMap<T> x_e(x_d, C, num);
        dscale_e = (x_e * dy_e).rowwise().sum();
        dbias_e = dy_e.rowwise().sum();
      }
307 308 309 310 311 312

      // compute dx
      if (dx) {
        EigenArrayMap<T> dx_e(dx_d, C, num);
        dx_e = dy_e.colwise() * scale_e;
      }
313 314 315 316
    }
  }
};

Z
Zeng Jinle 已提交
317 318 319 320 321
class AffineChannelNoNeedBufferVarsInference
    : public framework::NoNeedBufferVarsInference {
 public:
  using framework::NoNeedBufferVarsInference::NoNeedBufferVarsInference;

322 323 324 325 326 327
  const std::unordered_set<std::string>& operator()(
      const framework::InferNoNeedBufferVarsContext& ctx) const final {
    static const std::unordered_set<std::string> kX({"X"});
    if (!ctx.HasOutput(framework::GradVarName("Scale")) &&
        !ctx.HasOutput(framework::GradVarName("Bias"))) {
      return kX;
Z
Zeng Jinle 已提交
328
    } else {
329
      return Empty();
Z
Zeng Jinle 已提交
330 331 332 333
    }
  }
};

334 335 336 337 338
DECLARE_INPLACE_OP_INFERER(AffineChannelInplaceInferer, {"X", "Out"});
DECLARE_INPLACE_OP_INFERER(AffineChannelGradInplaceInferer,
                           {framework::GradVarName("Out"),
                            framework::GradVarName("X")});

339 340 341 342
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
L
Leo Chen 已提交
343
using CPU = phi::CPUContext;
344

345 346
REGISTER_OPERATOR(affine_channel,
                  ops::AffineChannelOp,
H
hong 已提交
347 348 349
                  ops::AffineChannelOpMaker,
                  ops::AffineChannelGradMaker<paddle::framework::OpDesc>,
                  ops::AffineChannelGradMaker<paddle::imperative::OpBase>,
350
                  ops::AffineChannelInplaceInferer);
351 352
REGISTER_OPERATOR(affine_channel_grad,
                  ops::AffineChannelOpGrad,
353 354
                  ops::AffineChannelNoNeedBufferVarsInference,
                  ops::AffineChannelGradInplaceInferer);
355

356 357 358 359 360 361 362 363 364
PD_REGISTER_STRUCT_KERNEL(
    affine_channel, CPU, ALL_LAYOUT, ops::AffineChannelKernel, float, double) {}

PD_REGISTER_STRUCT_KERNEL(affine_channel_grad,
                          CPU,
                          ALL_LAYOUT,
                          ops::AffineChannelGradKernel,
                          float,
                          double) {}