test_tensor_utils.cc 6.6 KB
Newer Older
石晓伟 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <gtest/gtest.h>
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/inference/lite/tensor_utils.h"

namespace paddle {
namespace inference {
namespace lite {
namespace utils {

using paddle::lite_api::TargetType;
using paddle::lite_api::PrecisionType;
using paddle::lite_api::DataLayoutType;

TEST(LiteEngineOp, GetNativePlace) {
  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
  platform::Place GetNativePlace(const TargetType& type, int id = 0);
  EXPECT_TRUE(platform::is_cpu_place(GetNativePlace(TargetType::kHost)));
  EXPECT_TRUE(platform::is_gpu_place(GetNativePlace(TargetType::kCUDA)));
33
  EXPECT_ANY_THROW(GetNativePlace(TargetType::kUnk));
石晓伟 已提交
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
}

TEST(LiteEngineOp, GetLiteTargetType) {
  TargetType GetLiteTargetType(const platform::Place& place);
  ASSERT_EQ(GetLiteTargetType(platform::CPUPlace()), TargetType::kHost);
  ASSERT_EQ(GetLiteTargetType(platform::CUDAPlace(0)), TargetType::kCUDA);
}

TEST(LiteEngineOp, GetLitePrecisionType) {
  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
  PrecisionType GetLitePrecisionType(framework::proto::VarType::Type type);
  ASSERT_EQ(GetLitePrecisionType(framework::proto::VarType_Type_FP32),
            PrecisionType::kFloat);
  ASSERT_EQ(GetLitePrecisionType(framework::proto::VarType_Type_INT8),
            PrecisionType::kInt8);
  ASSERT_EQ(GetLitePrecisionType(framework::proto::VarType_Type_INT32),
            PrecisionType::kInt32);
51 52
  EXPECT_ANY_THROW(
      GetLitePrecisionType(framework::proto::VarType_Type_SELECTED_ROWS));
石晓伟 已提交
53 54 55 56 57 58 59 60 61 62 63 64
}

TEST(LiteEngineOp, GetNativePrecisionType) {
  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
  framework::proto::VarType::Type GetNativePrecisionType(
      const PrecisionType& type);
  ASSERT_EQ(GetNativePrecisionType(PrecisionType::kFloat),
            framework::proto::VarType_Type_FP32);
  ASSERT_EQ(GetNativePrecisionType(PrecisionType::kInt8),
            framework::proto::VarType_Type_INT8);
  ASSERT_EQ(GetNativePrecisionType(PrecisionType::kInt32),
            framework::proto::VarType_Type_INT32);
65
  EXPECT_ANY_THROW(GetNativePrecisionType(PrecisionType::kUnk));
石晓伟 已提交
66 67 68 69 70 71 72
}

TEST(LiteEngineOp, GetNativeLayoutType) {
  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
  framework::DataLayout GetNativeLayoutType(const DataLayoutType& type);
  ASSERT_EQ(GetNativeLayoutType(DataLayoutType::kNCHW),
            framework::DataLayout::kNCHW);
73
  EXPECT_ANY_THROW(GetNativeLayoutType(DataLayoutType::kNHWC));
石晓伟 已提交
74 75
}

W
Wilber 已提交
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
template <typename T>
void test_lite_tensor_data_ptr(PrecisionType precision_type) {
  void* GetLiteTensorDataPtr(paddle::lite_api::Tensor * src,
                             PrecisionType precision_type,
                             TargetType target_type);
  const int count = 4;
  paddle::lite::Tensor lite_tensor;
  lite_tensor.Resize({count});
  auto* lite_tensor_data = lite_tensor.mutable_data<T>();
  for (size_t i = 0; i < count; ++i) {
    lite_tensor_data[i] = i;
  }
  paddle::lite_api::Tensor lite_api_tensor(&lite_tensor);
  T* data = static_cast<T*>(GetLiteTensorDataPtr(
      &lite_api_tensor, precision_type, TargetType::kHost));
  for (size_t i = 0; i < count; ++i) {
    CHECK_EQ(data[i], static_cast<T>(i)) << "the i-th num is not correct.";
  }
}

TEST(LiteEngineOp, GetLiteTensorDataPtr) {
  test_lite_tensor_data_ptr<int64_t>(PrecisionType::kInt64);
  test_lite_tensor_data_ptr<int32_t>(PrecisionType::kInt32);
  test_lite_tensor_data_ptr<int8_t>(PrecisionType::kInt8);
  EXPECT_ANY_THROW(test_lite_tensor_data_ptr<double>(PrecisionType::kUnk));
}

石晓伟 已提交
103 104 105 106
void test_tensor_copy(const platform::DeviceContext& ctx) {
  // Create LoDTensor.
  std::vector<float> vector({1, 2, 3, 4});
  framework::LoDTensor lod_tensor;
107
  framework::TensorFromVector(vector, ctx, &lod_tensor);
石晓伟 已提交
108 109 110 111 112
  framework::LoD lod({{0, 2, 4}});
  lod_tensor.Resize({4, 1});
  lod_tensor.set_lod(lod);
  // Create lite::Tensor and copy.
  paddle::lite::Tensor lite_tensor;
W
Wilber 已提交
113 114
  paddle::lite_api::Tensor lite_api_tensor(&lite_tensor);
  TensorCopyAsync(&lite_api_tensor, lod_tensor, ctx);
石晓伟 已提交
115 116
  // Copy to LoDTensor.
  framework::LoDTensor lod_tensor_n;
W
Wilber 已提交
117
  TensorCopyAsync(&lod_tensor_n, lite_api_tensor, ctx);
118
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
石晓伟 已提交
119 120 121 122 123 124
  if (platform::is_gpu_place(ctx.GetPlace())) {
    platform::GpuStreamSync(
        static_cast<const platform::CUDADeviceContext&>(ctx).stream());
  }
#endif
  std::vector<float> result;
125
  paddle::framework::TensorToVector(lod_tensor_n, ctx, &result);
126 127 128 129 130 131 132 133 134 135 136 137 138
  ASSERT_EQ(result, vector);
  ASSERT_EQ(lod_tensor_n.lod(), lod_tensor.lod());
}

void test_tensor_share(const platform::DeviceContext& ctx) {
  std::vector<float> vector({1, 2, 3, 4});
  framework::LoDTensor lod_tensor;
  framework::TensorFromVector(vector, ctx, &lod_tensor);
  framework::LoD lod({{0, 2, 4}});
  lod_tensor.Resize({4, 1});
  lod_tensor.set_lod(lod);
  // Create lite::Tensor and share.
  paddle::lite::Tensor lite_tensor;
W
Wilber 已提交
139 140
  paddle::lite_api::Tensor lite_api_tensor(&lite_tensor);
  TensorDataShare(&lite_api_tensor, &lod_tensor);
141 142
  // Copy to LoDTensor.
  framework::LoDTensor lod_tensor_n;
W
Wilber 已提交
143
  TensorCopyAsync(&lod_tensor_n, lite_api_tensor, ctx);
144
  std::vector<float> result;
145
  paddle::framework::TensorToVector(lod_tensor_n, ctx, &result);
石晓伟 已提交
146 147 148 149 150 151 152 153
  ASSERT_EQ(result, vector);
  ASSERT_EQ(lod_tensor_n.lod(), lod_tensor.lod());
}

TEST(LiteEngineOp, TensorCopyAsync) {
  auto* ctx_cpu =
      platform::DeviceContextPool::Instance().Get(platform::CPUPlace());
  test_tensor_copy(*ctx_cpu);
154
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
石晓伟 已提交
155 156 157 158 159 160
  auto* ctx_gpu =
      platform::DeviceContextPool::Instance().Get(platform::CUDAPlace(0));
  test_tensor_copy(*ctx_gpu);
#endif
}

161 162 163 164
TEST(LiteEngineOp, TensorShare) {
  auto* ctx_cpu =
      platform::DeviceContextPool::Instance().Get(platform::CPUPlace());
  test_tensor_share(*ctx_cpu);
165
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
166 167 168 169 170 171
  auto* ctx_gpu =
      platform::DeviceContextPool::Instance().Get(platform::CUDAPlace(0));
  test_tensor_share(*ctx_gpu);
#endif
}

石晓伟 已提交
172 173 174 175
}  // namespace utils
}  // namespace lite
}  // namespace inference
}  // namespace paddle