temporal_shift_op.cc 6.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at
   http://www.apache.org/licenses/LICENSE-2.0
   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License. */

#include "paddle/fluid/operators/temporal_shift_op.h"
13 14 15
#include <memory>
#include <string>
#include <vector>
16 17 18 19 20 21 22
#include "paddle/fluid/framework/op_registry.h"

namespace paddle {
namespace operators {

using framework::Tensor;

D
dengkaipeng 已提交
23
class TemporalShiftOp : public framework::OperatorWithKernel {
24 25 26 27 28
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {
29 30 31 32
    PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
                      "Input(X) of TemporalShiftOp should not be null.");
    PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
                      "Output(Out) of TemporalShiftOp should not be null.");
33 34

    auto dim_x = ctx->GetInputDim("X");
D
dengkaipeng 已提交
35 36
    PADDLE_ENFORCE_EQ(dim_x.size(), 4,
                      "Input(X) rank should be 4 in shape of [N*T, C, H, W].");
37 38

    int seg_num = ctx->Attrs().Get<int>("seg_num");
D
dengkaipeng 已提交
39
    float shift_ratio = ctx->Attrs().Get<float>("shift_ratio");
D
dengkaipeng 已提交
40
    PADDLE_ENFORCE_GT(seg_num, 0, "Attr(seg_num) should be greater than 0.");
41 42 43 44
    PADDLE_ENFORCE_GT(shift_ratio, 0.,
                      "Attr(shift_ratio) should be greater than 0");
    PADDLE_ENFORCE_LT(shift_ratio, 0.5,
                      "Attr(shift_ratio) should be less than 0.5");
45 46

    if (ctx->IsRuntime()) {
D
dengkaipeng 已提交
47 48 49
      PADDLE_ENFORCE_EQ(
          dim_x[0] % seg_num, 0,
          "Input(X) dims[0] should be divided exactly by Attr(seg_num).");
50 51
    }

D
dengkaipeng 已提交
52
    ctx->SetOutputDim("Out", dim_x);
53 54 55 56 57 58
    ctx->ShareLoD("X", "Out");
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
59 60
    return framework::OpKernelType(
        OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace());
61 62 63 64 65 66 67 68 69 70 71
  }
};

class TemporalShiftOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X",
             "The input tensor of temporal shift operator. "
             "This is a 4-D tensor with shape of [N*T,  C, H, W]. "
             "While N is the batch size, T is the temporal segment "
             "number, C is the channel number, H is the height of "
K
Kaipeng Deng 已提交
72 73
             "features and W is the width of features. "
             "The data type is float32 and float64");
74 75 76 77
    AddOutput("Out",
              "The output tensor of temporal shift operator. "
              "This is a 4-D tensor in the same shape with Input(X).");

D
dengkaipeng 已提交
78 79
    AddAttr<int>("seg_num",
                 "The temporal segment number, this should be a positive "
D
dengkaipeng 已提交
80
                 "integer.");
D
dengkaipeng 已提交
81 82
    AddAttr<float>(
        "shift_ratio",
D
dengkaipeng 已提交
83
        "The shift ratio of the channels, the first :attr:`shift_ratio` part "
D
dengkaipeng 已提交
84
        "of channels will be shifted by -1 along the temporal dimension, "
D
dengkaipeng 已提交
85
        "and the second :attr:`shift_ratio` part of channels will be shifted "
K
Kaipeng Deng 已提交
86 87
        "by 1 along the temporal dimension. :attr:`shift_ratio` should be in "
        "range [0, 0.5]. Default 0.25.")
D
dengkaipeng 已提交
88
        .SetDefault(0.25);
89 90

    AddComment(R"DOC(
91
          This operator calculates the temporal shifting features for Input(X).
92

93
          Input(X) should be in shape of [N*T, C, H, W], while N is the batch
D
dengkaipeng 已提交
94 95
          size, T is the temporal segment number specified by :attr:`seg_num`, 
          C is the channel number, H and W is the height and width of features.
96

D
dengkaipeng 已提交
97
          Temporal Shifting is calculated as follows:
98 99 100 101 102 103 104
          
          Step 1: Reshape Input(X) to [N, T, C, H, W].

          Step 2: Pad 0 to reshaping result in the 2nd(T) dimension with 
          padding width as 1 on each side, padding result will be in shape 
          of [N, T+2, C, H, W].

D
dengkaipeng 已提交
105
          Step 3: Assume :attr:`shift_ratio` is :math:`1/4`, slice padding 
D
dengkaipeng 已提交
106
          result as follows:
107

D
dengkaipeng 已提交
108 109 110 111 112 113 114 115 116 117 118 119
          $$
          slice1 = x[:, :T, :C/4, :, :]
          $$
          $$
          slice2 = x[:, 2:T+2, C/4:C/2, :, :]
          $$
          $$
          slice3 = x[:, 1:T+1, C/2:, :, :]
          $$

          Step 4: Concatenate three slices along the 3rd(C) dimension and 
          reshape result to [N*T, C, H, W].
120 121 122

          For details of temporal shifting, please refer to paper: 
          `Temporal Shift Module <http://arxiv.org/abs/1811.08383>`_ .
123 124 125 126 127

         )DOC");
  }
};

D
dengkaipeng 已提交
128
class TemporalShiftOpGrad : public framework::OperatorWithKernel {
129 130 131 132 133 134
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {
    if (ctx->HasOutput(framework::GradVarName("X"))) {
135 136
      ctx->SetOutputDim(framework::GradVarName("X"),
                        ctx->GetInputDim(framework::GradVarName("Out")));
137 138 139 140 141
    }
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
142 143 144
    return framework::OpKernelType(OperatorWithKernel::IndicateVarDataType(
                                       ctx, framework::GradVarName("Out")),
                                   ctx.GetPlace());
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
  }
};

class TemporalShiftGradOpDescMaker : public framework::SingleGradOpDescMaker {
 public:
  using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;

 protected:
  std::unique_ptr<framework::OpDesc> Apply() const override {
    std::unique_ptr<framework::OpDesc> op(new framework::OpDesc());
    op->SetType("temporal_shift_grad");
    op->SetInput(framework::GradVarName("Out"), OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), InputGrad("X"));
    op->SetAttrMap(Attrs());
    return op;
160 161 162 163 164 165 166
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
D
dengkaipeng 已提交
167
REGISTER_OPERATOR(temporal_shift, ops::TemporalShiftOp,
168
                  ops::TemporalShiftOpMaker, ops::TemporalShiftGradOpDescMaker);
169 170 171 172 173
REGISTER_OPERATOR(temporal_shift_grad, ops::TemporalShiftOpGrad);
REGISTER_OP_CPU_KERNEL(temporal_shift, ops::TemporalShiftKernel<float>,
                       ops::TemporalShiftKernel<double>);
REGISTER_OP_CPU_KERNEL(temporal_shift_grad, ops::TemporalShiftGradKernel<float>,
                       ops::TemporalShiftGradKernel<double>);