fill_constant_op.cc 8.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

X
Xin Pan 已提交
15
#include "paddle/fluid/operators/fill_constant_op.h"
L
liym27 已提交
16
#include <string>
W
wangchaochaohu 已提交
17
#include "paddle/fluid/framework/op_version_registry.h"
18 19 20
namespace paddle {
namespace operators {

X
Xin Pan 已提交
21
class FillConstantOp : public framework::OperatorWithKernel {
22
 public:
X
Xin Pan 已提交
23 24 25
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
26
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "FillConstant");
L
liym27 已提交
27

X
Xin Pan 已提交
28
    auto& shape = ctx->Attrs().Get<std::vector<int64_t>>("shape");
29 30 31 32 33
    if (!ctx->HasInput("ShapeTensor") && !ctx->HasInputs("ShapeTensorList")) {
      for (size_t i = 0; i < shape.size(); ++i) {
        PADDLE_ENFORCE_GE(
            shape[i], 0,
            platform::errors::InvalidArgument(
34
                "Each value of attribute 'shape' is expected to be no less "
35 36 37 38
                "than 0. But recieved: shape[%u] = %d; shape = [%s].",
                i, shape[i], framework::make_ddim(shape)));
      }
    }
L
liym27 已提交
39 40 41 42 43 44 45 46 47 48 49
    if (shape.empty() && ctx->HasInput("ShapeTensor")) {
      auto shape_dims = ctx->GetInputDim("ShapeTensor");
      int num_ele = 1;
      for (int i = 0; i < shape_dims.size(); ++i) {
        num_ele *= shape_dims[i];
      }
      auto vec_dims = std::vector<int>(num_ele, -1);
      ctx->SetOutputDim("Out", framework::make_ddim(vec_dims));

      return;
    }
Y
Yu Yang 已提交
50
    ctx->SetOutputDim("Out", framework::make_ddim(shape));
51
  }
T
tangwei12 已提交
52

X
Xin Pan 已提交
53
 protected:
54 55 56 57 58 59 60 61 62 63 64
  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const framework::Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override {
    if (var_name == "ShapeTensor" || var_name == "ShapeTensorList") {
      return expected_kernel_type;
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }

X
Xin Pan 已提交
65 66
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
67
    framework::OpKernelType kt = framework::OpKernelType(
X
Xin Pan 已提交
68 69
        framework::proto::VarType::Type(ctx.Attr<int>("dtype")),
        ctx.GetPlace());
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
    // TODO(zyfncg) The force_cpu and place_type are conflicted, it's a issue
    // lefted before, and we may merge them in the future.
    // In order to invoke new fill_constant kernel, the place of OpKernelType
    // will be setted by force_cpu and place_type here.
    if (ctx.Attr<bool>("force_cpu")) {
      kt.place_ = platform::CPUPlace();
    }
    auto place_type = ctx.Attr<int>("place_type");
    if (place_type != -1) {
      switch (place_type) {
        case 0:
          kt.place_ = platform::CPUPlace();
          break;
        case 1:
        case 2:
          kt.place_ = platform::CUDAPlace();
          break;
        case 3:
          kt.place_ = platform::XPUPlace();
          break;
90 91 92
        case 4:
          kt.place_ = platform::NPUPlace();
          break;
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
        default:
          PADDLE_THROW(platform::errors::Unimplemented(
              "Could NOT determine the place of variable, place_type = %d .",
              place_type));
      }
    }

    return kt;
  }

  framework::KernelSignature GetExpectedPtenKernelArgs(
      const framework::ExecutionContext& ctx) const override {
    if (!ctx.HasInput("ShapeTensor") &&
        ctx.MultiInput<framework::Tensor>("ShapeTensorList").empty() &&
        !ctx.HasInput("ValueTensor") &&
        !ctx.OutputVar("Out")->IsType<framework::SelectedRows>()) {
      const auto& str_value = ctx.Attr<std::string>("str_value");
      std::string value = str_value.empty() ? "value" : "str_value";
      return framework::KernelSignature("fill_constant.scalar", {}, {value},
                                        {"Out"});
    }
    return framework::KernelSignature("fill_constant.unregistered", {}, {}, {});
115 116 117
  }
};

118 119
class FillConstantOpVarTypeInference : public framework::VarTypeInference {
 public:
M
minqiyang 已提交
120
  void operator()(framework::InferVarTypeContext* ctx) const override {
X
Xin Pan 已提交
121
    auto data_type = static_cast<framework::proto::VarType::Type>(
122
        BOOST_GET_CONST(int, ctx->GetAttr("dtype")));
123
    ctx->SetOutputDataType("Out", data_type);
X
Xin Pan 已提交
124
  }
125 126
};

127 128
class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
129
  void Make() override {
F
fengjiayi 已提交
130
    AddAttr<int>("dtype",
131 132
                 "(int, default 5 (FP32)) "
                 "Output data type")
133
        .SetDefault(framework::proto::VarType::FP32);
T
tangwei12 已提交
134
    AddAttr<std::vector<int64_t>>("shape",
L
liym27 已提交
135 136
                                  "(vector<int64_t>) The shape of the output")
        .SetDefault({});
W
wangchaochaohu 已提交
137 138 139 140 141
    AddInput("ValueTensor",
             "(Tensor, optional) If provided, fill_constant Op will use this "
             "as value to set the output Tensor, this has a higher priority "
             "than attr(str_value), the shape of this tensor MUST BE [1].")
        .AsDispensable();
L
liym27 已提交
142 143 144 145 146 147 148 149 150 151
    AddInput("ShapeTensor",
             "(Tensor<int>), optional). The shape of the output."
             "It has a higher priority than Attr(shape).")
        .AsDispensable();
    AddInput("ShapeTensorList",
             "(vector<Tensor<int>>, optional). The shape of the output. "
             "It has a higher priority than Attr(shape)."
             "The shape of the element in vector must be [1].")
        .AsDuplicable()
        .AsDispensable();
152
    AddAttr<float>("value", "(float, default 0.0f) The value to be filled")
153
        .SetDefault(0.0f);
154 155 156 157
    AddAttr<std::string>(
        "str_value",
        "(string, default empty) The str convert to value to be filled")
        .SetDefault("");
Y
Yu Yang 已提交
158 159 160 161 162
    AddAttr<bool>("force_cpu",
                  "(bool, default false) Force fill output variable to cpu "
                  "memory. Otherwise, fill output variable to the running "
                  "device")
        .SetDefault(false);
J
JZ-LIANG 已提交
163 164 165 166 167 168 169
    AddAttr<int>("place_type",
                 "(int, default -1) allow mamually setting place where the "
                 "variable should be hold. "
                 "-1: not set manually, determine the place by executor. "
                 "0: CPUPlace. "
                 "1: CUDAPlace. "
                 "2: CUDAPinnedPlace. "
170 171
                 "3: XPUPlace. "
                 "4: NPUPlace. ")
J
JZ-LIANG 已提交
172
        .SetDefault(-1);
173 174 175
    AddOutput("Out",
              "(Tensor) Tensor of specified shape will be filled "
              "with the specified value");
K
kexinzhao 已提交
176 177 178 179 180 181
    AddComment(R"DOC(
FillConstantBatchSizeLike Operator.

Fill up a variable with specified constant value.

)DOC");
182 183 184 185 186 187
  }
};
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
X
Xin Pan 已提交
188

H
hong 已提交
189 190 191 192 193
REGISTER_OPERATOR(
    fill_constant, ops::FillConstantOp, ops::FillConstantOpMaker,
    ops::FillConstantOpVarTypeInference,
    paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
    paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);
X
Xin Pan 已提交
194

195 196 197
REGISTER_OP_CPU_KERNEL(
    fill_constant, ops::FillConstantKernel<float>,
    ops::FillConstantKernel<double>, ops::FillConstantKernel<uint8_t>,
198 199
    ops::FillConstantKernel<int16_t>, ops::FillConstantKernel<int>,
    ops::FillConstantKernel<int64_t>, ops::FillConstantKernel<bool>,
200 201 202 203
    ops::FillConstantKernel<paddle::platform::float16>,
    ops::FillConstantKernel<paddle::platform::bfloat16>,
    ops::FillConstantKernel<paddle::platform::complex<float>>,
    ops::FillConstantKernel<paddle::platform::complex<double>>);
W
wangchaochaohu 已提交
204 205 206 207 208 209 210 211

REGISTER_OP_VERSION(fill_constant)
    .AddCheckpoint(
        R"ROC(
      Upgrade fill_constant, add a new input [ValueTensor].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewInput(
            "ValueTensor",
J
JZ-LIANG 已提交
212 213 214 215 216 217 218 219
            "In order to support new feature tensor support of Value"))
    .AddCheckpoint(
        R"ROC(
      Upgrade fill_constant to add a new attribute [place_type].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "place_type",
            "In order to support tensor in CUDAPinnedPlace and XPUPlace", -1));