data_transform_test.cc 4.7 KB
Newer Older
Q
Qiao Longfei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14 15
#include <array>
#include <vector>
Q
Qiao Longfei 已提交
16 17 18

#include <gtest/gtest.h>

D
dzhwinter 已提交
19
#include "paddle/framework/data_transform.h"
D
dzhwinter 已提交
20
#include "paddle/platform/device_context.h"
D
dzhwinter 已提交
21

Q
Qiao Longfei 已提交
22 23 24 25
namespace paddle {
namespace framework {
using namespace platform;

D
dzhwinter 已提交
26 27 28 29 30 31 32 33 34
/**
 * @brief cross validation of different kernel type transform
 *  We use four bit map represent different combination.
 *  If the field has multiple possible value, only choose two of them.
 *  For DataType, only test the FP32(float), FP64(double).
 *  e.g. 0000 -> FP32, CPUPlace, kNHWC, kPlain
 *       1111 -> FP64, GPUPlace, kNCHW, kMKLDNN
 */

D
dzhwinter 已提交
35 36
std::array<proto::DataType, 2> kDataType = {proto::DataType::FP32,
                                            proto::DataType::FP64};
D
dzhwinter 已提交
37

D
dzhwinter 已提交
38
std::array<Place, 2> kPlace = {CPUPlace(), CUDAPlace(0)};
D
dzhwinter 已提交
39 40

std::array<DataLayout, 2> kDataLayout = {
D
dzhwinter 已提交
41 42
    DataLayout::kNHWC, DataLayout::kNCHW,
};
D
dzhwinter 已提交
43 44

std::array<LibraryType, 2> kLibraryType = {
D
dzhwinter 已提交
45 46
    LibraryType::kPlain, LibraryType::kMKLDNN,
};
D
dzhwinter 已提交
47 48 49 50 51 52

OpKernelType GenFromBit(const std::vector<bool> bits) {
  return OpKernelType(kDataType[bits[0]], kPlace[bits[1]], kDataLayout[bits[2]],
                      kLibraryType[bits[3]]);
}

Q
Qiao Longfei 已提交
53 54
int test_value = 0;

D
dzhwinter 已提交
55 56 57 58
auto kernel0 = GenFromBit({0, 0, 0, 0});
auto kernel1 = GenFromBit({0, 0, 0, 1});
auto kernel2 = GenFromBit({0, 0, 1, 0});
auto kernel3 = GenFromBit({0, 0, 1, 1});
Q
Qiao Longfei 已提交
59

D
dzhwinter 已提交
60 61
void TransDataType_t(const platform::DeviceContext* ctx,
                     const KernelTypePair& p, const Variable& in,
62
                     Variable* out) {
Q
Qiao Longfei 已提交
63 64 65
  test_value++;
}

D
dzhwinter 已提交
66 67
void TransDataLayout_t(const platform::DeviceContext* ctx,
                       const KernelTypePair& p, const Variable& in,
68
                       Variable* out) {
Q
Qiao Longfei 已提交
69 70 71
  test_value--;
}

D
dzhwinter 已提交
72 73
void TransLibraryType_t(const platform::DeviceContext* ctx,
                        const KernelTypePair& p, const Variable& in,
74
                        Variable* out) {
Q
Qiao Longfei 已提交
75 76 77 78 79 80 81 82
  test_value += 2;
}

}  // namespace framework
}  // namespace paddle

namespace frw = paddle::framework;

D
dzhwinter 已提交
83 84 85
REGISTER_DATA_TRANSFORM_FN(frw::kernel0, frw::kernel1, frw::TransDataType_t);
REGISTER_DATA_TRANSFORM_FN(frw::kernel1, frw::kernel2, frw::TransDataLayout_t);
REGISTER_DATA_TRANSFORM_FN(frw::kernel0, frw::kernel2, frw::TransLibraryType_t);
Q
Qiao Longfei 已提交
86 87 88 89 90 91 92 93 94

TEST(DataTransform, Register) {
  using namespace paddle::framework;
  using namespace paddle::platform;

  auto& instance = DataTransformFnMap::Instance();
  paddle::framework::Variable in;
  paddle::framework::Variable out;

D
dzhwinter 已提交
95 96 97
  DeviceContext* ctx = new CPUDeviceContext();
  auto pair0 = std::make_pair(frw::kernel0, frw::kernel1);
  instance.Get(pair0)(ctx, pair0, in, &out);
Q
Qiao Longfei 已提交
98
  ASSERT_EQ(test_value, 1);
D
dzhwinter 已提交
99

D
dzhwinter 已提交
100 101
  auto pair1 = std::make_pair(frw::kernel1, frw::kernel2);
  instance.Get(pair1)(ctx, pair1, in, &out);
Q
Qiao Longfei 已提交
102
  ASSERT_EQ(test_value, 0);
D
dzhwinter 已提交
103

D
dzhwinter 已提交
104 105
  auto pair3 = std::make_pair(frw::kernel0, frw::kernel2);
  instance.Get(pair3)(ctx, pair3, in, &out);
Q
Qiao Longfei 已提交
106 107
  ASSERT_EQ(test_value, 2);
}
D
dzhwinter 已提交
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156

TEST(DataTransform, Layout) {
  using namespace paddle::framework;
  using namespace paddle::platform;

  auto& instance = DataTransformFnMap::Instance();
  Variable in;
  Variable out;
  Tensor* src = in.GetMutable<Tensor>();
  src->mutable_data<double>(make_ddim({2, 3, 1, 2}), CPUPlace());
  src->set_layout(DataLayout::kNHWC);

  DeviceContext* ctx = new CPUDeviceContext();

  {
    auto kernel1 = GenFromBit({1, 0, 0, 0});
    auto kernel2 = GenFromBit({1, 0, 1, 0});
    auto pair0 = std::make_pair(kernel1, kernel2);
    instance.Get(pair0)(ctx, pair0, in, &out);
  }

  Tensor dst = out.Get<Tensor>();
  EXPECT_TRUE(dst.layout() != src->layout());
}

TEST(DataTransform, DataType) {
  using namespace paddle::framework;
  using namespace paddle::platform;

  auto& instance = DataTransformFnMap::Instance();
  DeviceContext* ctx = new CPUDeviceContext();

  Variable in;
  Variable out;
  Tensor* src = in.GetMutable<Tensor>();
  float* ptr = src->mutable_data<float>(make_ddim({2, 3}), CPUPlace());
  for (int i = 0; i < 6; ++i) {
    ptr[i] = i / 3;
  }

  {
    auto kernel1 = GenFromBit({0, 0, 0, 0});
    auto kernel2 = GenFromBit({1, 0, 0, 0});
    auto pair0 = std::make_pair(kernel1, kernel2);
    instance.Get(pair0)(ctx, pair0, in, &out);
  }
  Tensor dst = out.Get<Tensor>();
  EXPECT_TRUE(dst.data<double>() != nullptr);
}