set_value_op.cc 11.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
//   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/operators/set_value_op.h"
16

17
#include <string>
18 19

#include "paddle/fluid/framework/infershape_utils.h"
20
#include "paddle/fluid/framework/op_version_registry.h"
21 22 23
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"

24 25 26 27 28 29 30 31 32 33 34 35
namespace paddle {
namespace framework {
class InferShapeContext;
class OpDesc;
template <typename T>
class EmptyGradOpMaker;
}  // namespace framework
namespace imperative {
class OpBase;
}  // namespace imperative
}  // namespace paddle

36 37 38 39 40
namespace paddle {
namespace operators {

class SetValue : public framework::OperatorWithKernel {
 public:
41 42
  SetValue(const std::string &type,
           const framework::VariableNameMap &inputs,
43 44 45 46 47
           const framework::VariableNameMap &outputs,
           const framework::AttributeMap &attrs)
      : OperatorWithKernel(type, inputs, outputs, attrs) {}

 protected:
48
  phi::KernelKey GetExpectedKernelType(
49
      const framework::ExecutionContext &ctx) const override {
50 51
    return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(ctx, "Input"),
                          ctx.GetPlace());
52
  }
53

54
  phi::KernelKey GetKernelTypeForVar(
55
      const std::string &var_name,
56
      const phi::DenseTensor &tensor,
57
      const phi::KernelKey &expected_kernel_type) const override {
58 59
    if (var_name == "StartsTensorList" || var_name == "EndsTensorList" ||
        var_name == "StepsTensorList") {
60 61 62
      return phi::KernelKey(phi::Backend::ALL_BACKEND,
                            expected_kernel_type.layout(),
                            expected_kernel_type.dtype());
63
    }
64 65
    return phi::KernelKey(
        tensor.place(), tensor.layout(), expected_kernel_type.dtype());
66
  }
67 68 69 70 71
};

class SetValueMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
72
    // Input
73 74 75
    AddInput("Input", "(phi::DenseTensor) Input tensor of set_value operator.");
    AddInput("ValueTensor",
             "(phi::DenseTensor) Value tensor of set_value operator.")
76
        .AsDispensable();
77
    AddInput("StartsTensorList",
78 79
             "(vector<phi::DenseTensor<int32>>, optional) If provided, "
             "set_value will "
80 81 82 83 84
             "use this. The shape of the tensor in vector must be [1]."
             "It has higher priority compare with attr(starts).")
        .AsDuplicable()
        .AsDispensable();
    AddInput("EndsTensorList",
85 86
             "(vector<phi::DenseTensor<int32>>, optional) If provided, "
             "set_value will "
87 88 89 90 91 92
             "use this. The shape of the tensor in vector must BE [1]."
             "It has higher priority compare with attr(ends).")
        .AsDuplicable()
        .AsDispensable();

    AddInput("StepsTensorList",
93 94
             "(vector<phi::DenseTensor<int32>>, optional) If provided, "
             "set_value will "
95 96 97 98 99 100
             "use this. The shape of the tensor in vector must BE [1]."
             "It has higher priority compare with attr(steps).")
        .AsDuplicable()
        .AsDispensable();

    // Output
101
    AddOutput("Out",
102 103 104
              "(phi::DenseTensor) Output tensor of set_value operator. The "
              "output is the "
              "same phi::DenseTensor as input");
105

106
    // Attr
107
    AddAttr<int>("dtype", "data type of input.")
108 109 110 111
        .InEnum({framework::proto::VarType::BOOL,
                 framework::proto::VarType::INT32,
                 framework::proto::VarType::INT64,
                 framework::proto::VarType::FP32,
112
                 framework::proto::VarType::FP64,
113 114 115
                 framework::proto::VarType::FP16,
                 framework::proto::VarType::COMPLEX64,
                 framework::proto::VarType::COMPLEX128})
116 117 118 119 120
        .SetDefault(framework::proto::VarType::FP32);
    AddAttr<std::vector<int64_t>>(
        "axes", "(list<int64_t>) Axes that `starts` and `ends` apply to.");
    AddAttr<std::vector<int64_t>>(
        "starts",
121 122
        "(list<int64_t>) Starting indices of corresponding axis in `axes`.")
        .SetDefault({});
123 124
    AddAttr<std::vector<int64_t>>(
        "ends",
125 126 127 128 129
        "(list<int64_t>) Ending indices of corresponding axis in `axes`.")
        .SetDefault({});
    AddAttr<std::vector<int64_t>>(
        "steps", "(list<int64_t>) Stride step from the start to the end.")
        .SetDefault({});
130 131 132
    AddAttr<std::vector<int64_t>>("decrease_axes",
                                  "(list<int>) The axes to decrease.")
        .SetDefault({});
Z
zyfncg 已提交
133 134
    AddAttr<std::vector<int64_t>>("none_axes", "(list<int>) The axes to none.")
        .SetDefault({});
135

136
    AddAttr<std::vector<paddle::experimental::Scalar>>("values", "values")
137
        .SetDefault({});
138 139 140 141

    AddAttr<std::vector<int64_t>>("shape", "(vector<int64_t>) Shape of values.")
        .SetDefault({});
    AddComment(R"DOC(SetValue operator.
142
Assignment to a phi::DenseTensor in static graph mode.
143 144 145
)DOC");
  }
};
146 147 148 149 150 151 152 153 154

template <typename T>
class SetValueGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> op) const override {
    if (this->HasInput("ValueTensor")) {
155 156 157 158
      op->SetType("set_value_grad");

      op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
      op->SetInput("ValueTensor", this->Input("ValueTensor"));
159 160 161 162 163 164
      if (this->HasInput("StartsTensorList")) {
        op->SetInput("StartsTensorList", this->Input("StartsTensorList"));
      }
      if (this->HasInput("EndsTensorList")) {
        op->SetInput("EndsTensorList", this->Input("EndsTensorList"));
      }
165 166 167 168 169 170 171 172 173
      if (this->HasInput("StepsTensorList")) {
        op->SetInput("StepsTensorList", this->Input("StepsTensorList"));
      }

      op->SetAttrMap(this->Attrs());

      op->SetOutput(framework::GradVarName("ValueTensor"),
                    this->InputGrad("ValueTensor"));
      op->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input"));
174 175 176 177 178 179 180 181 182

    } else {
      op->SetType("assign");
      op->SetInput("X", this->OutputGrad("Out"));
      op->SetOutput("Out", this->InputGrad("Input"));
    }
  }
};

183 184 185 186 187
class SetValueGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext *ctx) const override {
188 189 190 191
    OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")),
                   "Input",
                   framework::GradVarName("Out"),
                   "set_value_grad");
192 193 194

    auto in_dims = ctx->GetInputDim(framework::GradVarName("Out"));
    PADDLE_ENFORCE_LT(
195 196
        in_dims.size(),
        7,
197 198 199 200 201 202 203 204 205 206 207 208 209 210
        platform::errors::InvalidArgument(
            "The dimension of set_value_grad operator's input should be less "
            "than 7, but received dimension is %d.",
            in_dims.size()));

    if (ctx->HasOutput(framework::GradVarName("ValueTensor"))) {
      ctx->ShareDim("ValueTensor",
                    /*->*/ framework::GradVarName("ValueTensor"));
      ctx->ShareLoD("ValueTensor",
                    /*->*/ framework::GradVarName("ValueTensor"));
    }
  }

 protected:
211
  phi::KernelKey GetExpectedKernelType(
212
      const framework::ExecutionContext &ctx) const override {
213
    auto in_tensor = ctx.Input<phi::DenseTensor>(framework::GradVarName("Out"));
214 215 216
    return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(
                              ctx, framework::GradVarName("Out")),
                          in_tensor->place());
217
  }
218
  phi::KernelKey GetKernelTypeForVar(
219
      const std::string &var_name,
220
      const phi::DenseTensor &tensor,
221
      const phi::KernelKey &expected_kernel_type) const override {
222 223
    if (var_name == "StartsTensorList" || var_name == "EndsTensorList" ||
        var_name == "StepsTensorList") {
224 225 226
      return phi::KernelKey(phi::Backend::ALL_BACKEND,
                            expected_kernel_type.layout(),
                            expected_kernel_type.dtype());
227
    }
228 229
    return phi::KernelKey(
        tensor.place(), tensor.layout(), expected_kernel_type.dtype());
230 231 232
  }
};

233 234
DECLARE_INPLACE_OP_INFERER(SetValueOpInplaceInferer, {"Input", "Out"});

235 236 237 238
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
239
namespace plat = paddle::platform;
240

241 242
DECLARE_INFER_SHAPE_FUNCTOR(set_value,
                            SetValueInferShapeFunctor,
243 244
                            PD_INFER_META(phi::SetValueInferMeta));

245 246 247
REGISTER_OPERATOR(set_value,
                  ops::SetValue,
                  ops::SetValueMaker,
248 249
                  ops::SetValueGradMaker<paddle::framework::OpDesc>,
                  ops::SetValueGradMaker<paddle::imperative::OpBase>,
250 251
                  ops::SetValueOpInplaceInferer,
                  SetValueInferShapeFunctor);
252

253 254
REGISTER_OPERATOR(set_value_grad, ops::SetValueGrad);

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
REGISTER_OP_VERSION(set_value)
    .AddCheckpoint(
        R"ROC(
Upgrade set_value, add 3 inputs [StartsTensorList, EndsTensorList, StepsTensorList] and 1 attribute [steps].
              )ROC",
        paddle::framework::compatible::OpVersionDesc()
            .NewInput("StartsTensorList",
                      "If provided, set_value will use this.The shape of the "
                      "tensor in vector must be [1]. It has higher priority "
                      "compare with attr(starts).")
            .NewInput("EndsTensorList",
                      "If provided, set_value will use this.The shape of the "
                      "tensor in vector must be [1]. It has higher priority "
                      "compare with attr(ends).")
            .NewInput("StepsTensorList",
                      "If provided, set_value will use this.The shape of the "
                      "tensor in vector must be [1]. It has higher priority "
                      "compare with attr(steps).")
            .ModifyAttr("starts",
                        "Starting indices of corresponding axis in `axes`.",
                        std::vector<int64_t>{})
            .ModifyAttr("ends",
                        "Ending indices of corresponding axis in `axes`.",
                        std::vector<int64_t>{})
279 280
            .NewAttr("steps",
                     "Stride step from the start to the end.",
281 282 283 284 285 286
                     std::vector<int64_t>{}))
    .AddCheckpoint(
        R"ROC(
Upgrade set_value, add 1 attribute [decrease_axes].
              )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
Z
zyfncg 已提交
287 288 289 290 291 292
            "decrease_axes", "The axes to decrease.", std::vector<int64_t>{}))
    .AddCheckpoint(
        R"ROC(
Upgrade set_value, add 1 attribute [none_axes].
              )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
293 294 295 296 297 298 299 300 301 302 303 304
            "none_axes", "The axes with none index.", std::vector<int64_t>{}))
    .AddCheckpoint(
        R"ROC(Upgrade set_value to support generic Scalars as value and remove plain values, so as to support complex types.)ROC",
        paddle::framework::compatible::OpVersionDesc()
            .NewAttr("values",
                     "values",
                     std::vector<paddle::experimental::Scalar>())
            .DeleteAttr("bool_values", "remove plain attributes")
            .DeleteAttr("fp32_values", "remove plain attributes")
            .DeleteAttr("int32_values", "remove plain attributes")
            .DeleteAttr("int64_values", "remove plain attributes")
            .DeleteAttr("fp64_values", "remove plain attributes"));