cast_compute.cc 3.2 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "lite/kernels/arm/cast_compute.h"
16
#include <algorithm>
17
#include "lite/backends/arm/math/funcs.h"
Y
Yan Chunwei 已提交
18 19 20 21 22 23

namespace paddle {
namespace lite {
namespace kernels {
namespace arm {

24 25
template <class in_type, class out_type>
out_type TransOp(in_type in) {
26
  return static_cast<out_type>(in);
27 28
}

Y
Yan Chunwei 已提交
29 30 31 32 33 34 35 36
void CastCompute::PrepareForRun() {}

void CastCompute::Run() {
  auto& ctx = this->ctx_->template As<ARMContext>();
  auto& param = this->Param<operators::CastParam>();

  auto input_dims = param.X->dims();

37 38 39
  // BOOL = 0;INT16 = 1;INT32 = 2;INT64 = 3;FP16 = 4;FP32 = 5;FP64 = 6;
  // SIZE_T = 19;UINT8 = 20;INT8 = 21;
  if (param.in_dtype == param.out_dtype && param.in_dtype == 2) {
Y
Yan Chunwei 已提交
40 41 42
    const auto* x_data = param.X->data<float>();
    auto* o_data = param.Out->mutable_data<float>();
    memcpy(o_data, x_data, sizeof(float) * param.X->numel());
43 44 45 46 47
  } else if (param.in_dtype == 21 && param.out_dtype == 5) {  // int8->float32
    const char* x_data_begin = param.X->data<char>();
    const char* x_data_end = x_data_begin + param.X->numel();
    float* out_data = param.Out->mutable_data<float>();
    std::transform(x_data_begin, x_data_end, out_data, TransOp<char, float>);
48 49 50 51
  } else if (param.in_dtype == 2 && param.out_dtype == 5) {  // int32 -> float32
    const int32_t* x_data_begin = param.X->data<int32_t>();
    const int32_t* x_data_end = x_data_begin + param.X->numel();
    float* out_data = param.Out->mutable_data<float>();
52
    std::transform(x_data_begin, x_data_end, out_data, TransOp<int32_t, float>);
53 54 55 56 57 58
  } else if (param.in_dtype == 20 && param.out_dtype == 5) {  // uint8->float32
    const unsigned char* x_data_begin = param.X->data<unsigned char>();
    const unsigned char* x_data_end = x_data_begin + param.X->numel();
    float* out_data = param.Out->mutable_data<float>();
    std::transform(
        x_data_begin, x_data_end, out_data, TransOp<unsigned char, float>);
59 60 61 62 63 64
  } else if (param.in_dtype == 3 && param.out_dtype == 2) {
    const int64_t* x_data_begin = param.X->data<int64_t>();
    const int64_t* x_data_end = x_data_begin + param.X->numel();
    int32_t* out_data = param.Out->mutable_data<int32_t>();
    std::transform(
        x_data_begin, x_data_end, out_data, TransOp<int64_t, int32_t>);
Y
Yan Chunwei 已提交
65 66 67 68 69 70 71 72 73 74 75 76
  } else {
    LOG(FATAL) << "other has not been implemented";
  }
}

}  // namespace arm
}  // namespace kernels
}  // namespace lite
}  // namespace paddle

REGISTER_LITE_KERNEL(
    cast, kARM, kFloat, kNCHW, paddle::lite::kernels::arm::CastCompute, def)
J
juncaipeng 已提交
77 78
    .BindInput("X", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kAny))})
    .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM), PRECISION(kAny))})
Y
Yan Chunwei 已提交
79
    .Finalize();