mul_compute_test.cc 4.2 KB
Newer Older
T
tensor-tang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

T
tensor-tang 已提交
15
#include "paddle/fluid/lite/kernels/arm/mul_compute.h"
T
tensor-tang 已提交
16
#include <gtest/gtest.h>
T
tensor-tang 已提交
17 18
#include <algorithm>
#include <iostream>
T
tensor-tang 已提交
19
#include <memory>
T
tensor-tang 已提交
20
#include <random>
T
tensor-tang 已提交
21
#include <utility>
T
tensor-tang 已提交
22 23 24 25 26 27 28 29 30
#include <vector>
#include "paddle/fluid/lite/arm/math/funcs.h"
#include "paddle/fluid/lite/core/op_registry.h"

namespace paddle {
namespace lite {
namespace kernels {
namespace arm {

T
tensor-tang 已提交
31 32 33 34 35 36 37 38 39 40 41
template <typename T>
void FillData(T* a, const int n, const T lower = static_cast<T>(-2.f),
              const T upper = static_cast<T>(2.f)) {
  static unsigned int seed = 100;
  std::mt19937 rng(seed++);
  std::uniform_real_distribution<double> uniform_dist(0, 1);
  for (int i = 0; i < n; ++i) {
    a[i] = static_cast<T>(uniform_dist(rng) * (upper - lower) + lower);
  }
}

T
tensor-tang 已提交
42 43 44 45 46
TEST(mul_arm, retrive_op) {
  auto mul =
      KernelRegistry::Global().Create<TARGET(kARM), PRECISION(kFloat)>("mul");
  ASSERT_FALSE(mul.empty());
  ASSERT_TRUE(mul.front());
T
tensor-tang 已提交
47 48
}

T
tensor-tang 已提交
49
TEST(mul_arm, init) {
T
tensor-tang 已提交
50
  MulCompute mul;
T
tensor-tang 已提交
51 52
  ASSERT_EQ(mul.precision(), PRECISION(kFloat));
  ASSERT_EQ(mul.target(), TARGET(kARM));
T
tensor-tang 已提交
53 54
}

T
tensor-tang 已提交
55
TEST(mul_arm, compare_test) {
T
tensor-tang 已提交
56 57 58 59 60
  using T = float;

  for (int m : {1, 2, 3, 4}) {
    for (int n : {1, 2, 3, 4}) {
      for (int k : {1, 2, 3, 4}) {
T
tensor-tang 已提交
61
        VLOG(3) << "m: " << m << ", n: " << n << ", k: " << k;
T
tensor-tang 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74
        lite::Tensor x, y, out, ref;
        x.Resize({m, k});
        y.Resize({k, n});
        out.Resize({m, n});
        ref.Resize({m, n});

        auto* x_data = x.mutable_data<T>();
        auto* y_data = y.mutable_data<T>();
        auto* out_data = out.mutable_data<T>();
        auto* ref_data = ref.mutable_data<T>();

        FillData<T>(x_data, x.dims().production());
        FillData<T>(y_data, y.dims().production());
T
tensor-tang 已提交
75
        FillData<T>(out_data, out.dims().production(), 0, 0);
T
Tensor Tang 已提交
76
        FillData<T>(ref_data, ref.dims().production(), 0, 0);
T
tensor-tang 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100

        MulCompute mul;
        operators::MulParam param;

        param.x = &x;
        param.y = &y;
        param.output = &out;

        DeviceInfo::Init();
        std::unique_ptr<KernelContext> ctx(new KernelContext);
        ctx->As<ARMContext>();
        mul.SetParam(param);
        mul.SetContext(std::move(ctx));
        mul.PrepareForRun();

        mul.Run();

        lite::arm::math::mul_compute_eigen(x_data, m, k, y_data, k, n,
                                           ref_data);
        for (int i = 0; i < out.dims().production(); i++) {
          EXPECT_NEAR(out_data[i], ref_data[i], 1e-3);
        }
      }
    }
T
tensor-tang 已提交
101
  }
T
tensor-tang 已提交
102 103 104 105
}

TEST(mul_arm, num_col_dims) {
  using T = float;
T
tensor-tang 已提交
106

T
tensor-tang 已提交
107 108 109 110 111
  lite::Tensor x, y, out, ref;
  x.Resize({2, 3, 4});
  y.Resize({3, 4, 5});
  out.Resize({2, 5});
  ref.Resize({2, 5});
T
tensor-tang 已提交
112

T
tensor-tang 已提交
113 114 115 116
  auto* x_data = x.mutable_data<T>();
  auto* y_data = y.mutable_data<T>();
  auto* out_data = out.mutable_data<T>();
  auto* ref_data = ref.mutable_data<T>();
T
tensor-tang 已提交
117

T
tensor-tang 已提交
118 119 120 121 122 123 124 125 126 127
  FillData<T>(x_data, x.dims().production());
  FillData<T>(y_data, y.dims().production());
  FillData<T>(out_data, out.dims().production());
  FillData<T>(ref_data, out.dims().production());

  MulCompute mul;
  operators::MulParam param;

  param.x = &x;
  param.y = &y;
T
tensor-tang 已提交
128
  param.output = &out;
T
tensor-tang 已提交
129 130
  param.x_num_col_dims = 1;
  param.y_num_col_dims = 2;
T
tensor-tang 已提交
131 132 133 134

  DeviceInfo::Init();
  std::unique_ptr<KernelContext> ctx(new KernelContext);
  ctx->As<ARMContext>();
T
tensor-tang 已提交
135 136
  mul.SetParam(param);
  mul.SetContext(std::move(ctx));
T
tensor-tang 已提交
137
  mul.PrepareForRun();
T
tensor-tang 已提交
138

T
tensor-tang 已提交
139
  mul.Run();
T
tensor-tang 已提交
140

T
tensor-tang 已提交
141 142 143
  lite::arm::math::mul_compute_eigen(x_data, 2, 12, y_data, 12, 5, ref_data);
  for (int i = 0; i < out.dims().production(); i++) {
    EXPECT_NEAR(out_data[i], ref_data[i], 1e-3);
T
tensor-tang 已提交
144 145 146 147 148 149 150 151
  }
}

}  // namespace arm
}  // namespace kernels
}  // namespace lite
}  // namespace paddle

T
tensor-tang 已提交
152
USE_LITE_KERNEL(mul, kARM, kFloat, kNCHW, def);