fill_constant_op.cc 2.7 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#include "lite/operators/fill_constant_op.h"
Y
Yan Chunwei 已提交
16 17 18 19 20 21
#include "lite/core/op_registry.h"

namespace paddle {
namespace lite {
namespace operators {

22 23 24 25 26
bool FillConstantOp::CheckShape() const {
  CHECK(param_.out);
  return true;
}

27
bool FillConstantOp::InferShapeImpl() const {
28 29 30 31 32 33 34
  std::vector<int64_t> out_shape;
  auto shape_tensor = param_.shape_tensor;
  auto shape_tensor_list = param_.shape_tensor_list;
  if (shape_tensor != nullptr) {
    auto shape_tensor_data = shape_tensor->data<int>();
    for (int i = 0; i < shape_tensor->numel(); i++) {
      out_shape.push_back(shape_tensor_data[i]);
35
    }
36
  } else if (!shape_tensor_list.empty()) {
37
    for (size_t i = 0; i < shape_tensor_list.size(); i++) {
38
      out_shape.push_back(shape_tensor_list[i]->data<int>()[0]);
39
    }
40 41 42 43 44
  } else if (!param_.shape.empty()) {
    out_shape = param_.shape;
  } else {
    LOG(FATAL) << "no valid out_shape. Must set one of shape_tensor, or "
                  "shape_tensor_list, or shape.";
Y
Yan Chunwei 已提交
45 46
  }

47 48 49
  param_.out->Resize(out_shape);
  return true;
}
Y
Yan Chunwei 已提交
50

51 52
bool FillConstantOp::AttachImpl(const cpp::OpDesc& opdesc, lite::Scope* scope) {
  auto out_name = opdesc.Output("Out").front();
T
TianXiaogang 已提交
53

54 55 56 57
  param_.out = GetMutableVar<lite::Tensor>(scope, out_name);
  param_.dtype = opdesc.GetAttr<int>("dtype");
  if (opdesc.HasAttr("shape")) {
    param_.shape = opdesc.GetAttr<std::vector<int64_t>>("shape");
T
TianXiaogang 已提交
58
  }
59 60
  param_.value = opdesc.GetAttr<float>("value");
  param_.force_cpu = opdesc.GetAttr<bool>("force_cpu");
T
TianXiaogang 已提交
61

62 63 64
  if (opdesc.HasInput("ShapeTensor") && !opdesc.Input("ShapeTensor").empty()) {
    auto shape_tensor_name = opdesc.Input("ShapeTensor").front();
    param_.shape_tensor = GetMutableVar<lite::Tensor>(scope, shape_tensor_name);
T
TianXiaogang 已提交
65
  }
66 67 68 69 70
  if (opdesc.HasInput("ShapeTensorList") &&
      !opdesc.Input("ShapeTensorList").empty()) {
    for (auto shape_tensor_name : opdesc.Input("ShapeTensorList")) {
      param_.shape_tensor_list.push_back(
          GetMutableVar<lite::Tensor>(scope, shape_tensor_name));
T
TianXiaogang 已提交
71 72
    }
  }
73 74
  return true;
}
T
TianXiaogang 已提交
75

Y
Yan Chunwei 已提交
76 77 78 79 80
}  // namespace operators
}  // namespace lite
}  // namespace paddle

REGISTER_LITE_OP(fill_constant, paddle::lite::operators::FillConstantOp);