fill_constant_op.cc 5.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

X
Xin Pan 已提交
15
#include "paddle/fluid/operators/fill_constant_op.h"
L
liym27 已提交
16
#include <string>
17 18 19
namespace paddle {
namespace operators {

X
Xin Pan 已提交
20
class FillConstantOp : public framework::OperatorWithKernel {
21
 public:
X
Xin Pan 已提交
22 23 24
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
25
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "FillConstant");
L
liym27 已提交
26

X
Xin Pan 已提交
27
    auto& shape = ctx->Attrs().Get<std::vector<int64_t>>("shape");
28 29 30 31 32 33 34 35 36 37
    if (!ctx->HasInput("ShapeTensor") && !ctx->HasInputs("ShapeTensorList")) {
      for (size_t i = 0; i < shape.size(); ++i) {
        PADDLE_ENFORCE_GE(
            shape[i], 0,
            platform::errors::InvalidArgument(
                "Each value of attribute 'shape' is expected to be greater "
                "than 0. But recieved: shape[%u] = %d; shape = [%s].",
                i, shape[i], framework::make_ddim(shape)));
      }
    }
L
liym27 已提交
38 39 40 41 42 43 44 45 46 47 48 49

    if (shape.empty() && ctx->HasInput("ShapeTensor")) {
      auto shape_dims = ctx->GetInputDim("ShapeTensor");
      int num_ele = 1;
      for (int i = 0; i < shape_dims.size(); ++i) {
        num_ele *= shape_dims[i];
      }
      auto vec_dims = std::vector<int>(num_ele, -1);
      ctx->SetOutputDim("Out", framework::make_ddim(vec_dims));

      return;
    }
Y
Yu Yang 已提交
50
    ctx->SetOutputDim("Out", framework::make_ddim(shape));
51
  }
T
tangwei12 已提交
52

X
Xin Pan 已提交
53 54 55 56 57 58
 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return framework::OpKernelType(
        framework::proto::VarType::Type(ctx.Attr<int>("dtype")),
        ctx.GetPlace());
59 60 61
  }
};

62 63
class FillConstantOpVarTypeInference : public framework::VarTypeInference {
 public:
M
minqiyang 已提交
64
  void operator()(framework::InferVarTypeContext* ctx) const override {
X
Xin Pan 已提交
65
    auto data_type = static_cast<framework::proto::VarType::Type>(
M
minqiyang 已提交
66 67 68
        boost::get<int>(ctx->GetAttr("dtype")));
    auto& out_var_name = ctx->Output("Out").front();
    ctx->SetDataType(out_var_name, data_type);
X
Xin Pan 已提交
69
  }
70 71
};

72 73
class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
74
  void Make() override {
F
fengjiayi 已提交
75
    AddAttr<int>("dtype",
76 77
                 "(int, default 5 (FP32)) "
                 "Output data type")
78
        .SetDefault(framework::proto::VarType::FP32);
T
tangwei12 已提交
79
    AddAttr<std::vector<int64_t>>("shape",
L
liym27 已提交
80 81
                                  "(vector<int64_t>) The shape of the output")
        .SetDefault({});
W
wangchaochaohu 已提交
82 83 84 85 86
    AddInput("ValueTensor",
             "(Tensor, optional) If provided, fill_constant Op will use this "
             "as value to set the output Tensor, this has a higher priority "
             "than attr(str_value), the shape of this tensor MUST BE [1].")
        .AsDispensable();
L
liym27 已提交
87 88 89 90 91 92 93 94 95 96
    AddInput("ShapeTensor",
             "(Tensor<int>), optional). The shape of the output."
             "It has a higher priority than Attr(shape).")
        .AsDispensable();
    AddInput("ShapeTensorList",
             "(vector<Tensor<int>>, optional). The shape of the output. "
             "It has a higher priority than Attr(shape)."
             "The shape of the element in vector must be [1].")
        .AsDuplicable()
        .AsDispensable();
97
    AddAttr<float>("value", "(float, default 0.0f) The value to be filled")
98
        .SetDefault(0.0f);
99 100 101 102
    AddAttr<std::string>(
        "str_value",
        "(string, default empty) The str convert to value to be filled")
        .SetDefault("");
Y
Yu Yang 已提交
103 104 105 106 107
    AddAttr<bool>("force_cpu",
                  "(bool, default false) Force fill output variable to cpu "
                  "memory. Otherwise, fill output variable to the running "
                  "device")
        .SetDefault(false);
108 109 110
    AddOutput("Out",
              "(Tensor) Tensor of specified shape will be filled "
              "with the specified value");
K
kexinzhao 已提交
111 112 113 114 115 116
    AddComment(R"DOC(
FillConstantBatchSizeLike Operator.

Fill up a variable with specified constant value.

)DOC");
117 118 119 120 121 122
  }
};
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
X
Xin Pan 已提交
123

H
hong 已提交
124 125 126 127 128
REGISTER_OPERATOR(
    fill_constant, ops::FillConstantOp, ops::FillConstantOpMaker,
    ops::FillConstantOpVarTypeInference,
    paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
    paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);
X
Xin Pan 已提交
129 130 131

REGISTER_OP_CPU_KERNEL(fill_constant, ops::FillConstantKernel<float>,
                       ops::FillConstantKernel<double>,
X
Xin Pan 已提交
132
                       ops::FillConstantKernel<int64_t>,
X
Xin Pan 已提交
133
                       ops::FillConstantKernel<int>,
134
                       ops::FillConstantKernel<bool>,
X
Xin Pan 已提交
135
                       ops::FillConstantKernel<paddle::platform::float16>);