fill_constant_compute.cc 5.3 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "lite/core/kernel.h"
#include "lite/core/op_registry.h"

namespace paddle {
namespace lite {
namespace kernels {
namespace arm {

J
juncaipeng 已提交
23
class FillConstantCompute : public KernelLite<TARGET(kARM), PRECISION(kAny)> {
Y
Yan Chunwei 已提交
24 25 26
 public:
  using param_t = operators::FillConstantParam;

27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
  inline DDimLite GetShape(const param_t& param) {
    // 1. shape is a Tensor
    if (param.shape_tensor != nullptr) {
      auto* shape_tensor = param.shape_tensor;
      auto* shape_data = shape_tensor->data<int>();
      auto vec_shape =
          std::vector<int64_t>(shape_data, shape_data + shape_tensor->numel());
      return DDimLite(vec_shape);
    }

    // 2. shape is a list/tuple containing Tensor
    auto shape_tensor_list = param.shape_tensor_list;
    if (shape_tensor_list.size() > 0) {
      std::vector<int64_t> vec_shape;
      for (size_t i = 0; i < shape_tensor_list.size(); ++i) {
        auto tensor = shape_tensor_list[i];
        vec_shape.push_back(*tensor->data<int>());
      }
      return DDimLite(vec_shape);
    }

    // 3. shape is a list/tuple without containing Tensor
    auto vec_shape = param.shape;
    return DDimLite(vec_shape);
  }

  void PrepareForRun() override {
    auto& param = *param_.get_mutable<param_t>();
    auto outdims = GetShape(param);
    param.Out->Resize(outdims);
  }

Y
Yan Chunwei 已提交
59 60 61 62
  void Run() override {
    auto& param = *param_.get_mutable<param_t>();
    auto& context = ctx_->As<ARMContext>();

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
    if (param.dtype == static_cast<int32_t>(lite::core::FluidType::FP32)) {
      auto data = param.Out->template mutable_data<float>();
      for (int i = 0; i < param.Out->numel(); i++) {
        data[i] = param.value;
      }
    } else if (param.dtype ==
               static_cast<int32_t>(lite::core::FluidType::INT32)) {
      auto data = param.Out->template mutable_data<int32_t>();
      for (int i = 0; i < param.Out->numel(); i++) {
        data[i] = param.value;
      }
    } else if (param.dtype ==
               static_cast<int32_t>(lite::core::FluidType::INT8)) {
      auto data = param.Out->template mutable_data<int8_t>();
      for (int i = 0; i < param.Out->numel(); i++) {
        data[i] = param.value;
      }
    } else {
      LOG(FATAL) << "not supported dtype " << param.dtype;
Y
Yan Chunwei 已提交
82 83 84 85 86 87
    }
  }

  virtual ~FillConstantCompute() = default;
};

T
TianXiaogang 已提交
88
class FillConstantBatchLikeCompute
J
juncaipeng 已提交
89
    : public KernelLite<TARGET(kARM), PRECISION(kAny)> {
T
TianXiaogang 已提交
90 91 92 93 94 95 96 97 98 99 100 101 102
 public:
  using param_t = operators::FillConstantBatchLikeParam;

  void Run() override {
    auto& param = *param_.get_mutable<param_t>();
    auto& context = ctx_->As<ARMContext>();

    if (param.input->lod().size() && param.input_dim_idx == 0) {
      auto odims = param.out->dims();
      odims[param.output_dim_idx] = param.input->lod().back().size() - 1;
      param.out->Resize(odims);
    }

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
    if (param.dtype == static_cast<int32_t>(lite::core::FluidType::FP32)) {
      auto data = param.out->template mutable_data<float>();
      for (int i = 0; i < param.out->numel(); i++) {
        data[i] = param.value;
      }
    } else if (param.dtype ==
               static_cast<int32_t>(lite::core::FluidType::INT32)) {
      auto data = param.out->template mutable_data<int32_t>();
      for (int i = 0; i < param.out->numel(); i++) {
        data[i] = param.value;
      }
    } else if (param.dtype ==
               static_cast<int32_t>(lite::core::FluidType::INT8)) {
      auto data = param.out->template mutable_data<int8_t>();
      for (int i = 0; i < param.out->numel(); i++) {
        data[i] = param.value;
      }
    } else {
      LOG(FATAL) << "not supported dtype " << param.dtype;
T
TianXiaogang 已提交
122 123 124 125 126 127
    }
  }

  virtual ~FillConstantBatchLikeCompute() = default;
};

Y
Yan Chunwei 已提交
128 129 130 131 132 133 134 135
}  // namespace arm
}  // namespace kernels
}  // namespace lite
}  // namespace paddle

// float
REGISTER_LITE_KERNEL(fill_constant,
                     kARM,
J
juncaipeng 已提交
136
                     kAny,
Y
Yan Chunwei 已提交
137
                     kNCHW,
J
juncaipeng 已提交
138
                     paddle::lite::kernels::arm::FillConstantCompute,
Y
Yan Chunwei 已提交
139
                     def)
140 141 142 143 144
    .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM))})
    .BindInput("ShapeTensor",
               {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))})
    .BindInput("ShapeTensorList",
               {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kInt32))})
J
juncaipeng 已提交
145
    .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kAny))})
Y
Yan Chunwei 已提交
146
    .Finalize();
J
juncaipeng 已提交
147 148 149 150 151 152 153 154
REGISTER_LITE_KERNEL(fill_constant_batch_size_like,
                     kARM,
                     kAny,
                     kNCHW,
                     paddle::lite::kernels::arm::FillConstantBatchLikeCompute,
                     def)
    .BindInput("Input", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kAny))})
    .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kAny))})
T
TianXiaogang 已提交
155
    .Finalize();