elementwise_add_compute_test.cc 4.8 KB
Newer Older
T
tensor-tang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/lite/kernels/arm/elementwise_add_compute.h"
#include <gtest/gtest.h>
#include <vector>
#include "paddle/fluid/lite/core/op_registry.h"

namespace paddle {
namespace lite {
namespace kernels {
namespace arm {

TEST(elementwise_add_arm, retrive_op) {
  auto elementwise_add =
      KernelRegistry::Global().Create<TARGET(kARM), PRECISION(kFloat)>(
          "elementwise_add");
  ASSERT_FALSE(elementwise_add.empty());
  ASSERT_TRUE(elementwise_add.front());
}

TEST(elementwise_add_arm, init) {
  ElementwiseAddCompute elementwise_add;
  ASSERT_EQ(elementwise_add.precision(), PRECISION(kFloat));
  ASSERT_EQ(elementwise_add.target(), TARGET(kARM));
}

template <typename dtype>
void elementwise_add_compute_ref(const operators::ElementwiseParam& param) {
  const dtype* x_data = param.X->data<const dtype>();
  const dtype* y_data = param.Y->data<const dtype>();
  dtype* out_data = param.Out->mutable_data<dtype>();
44 45 46
  auto x_dims = param.X->dims();
  auto y_dims = param.Y->dims();
  int axis = param.axis;
Z
zhupengyang 已提交
47 48 49
  if (axis < 0) {
    axis = x_dims.size() - y_dims.size();
  }
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
  int batch = 1;
  int channels = 1;
  int num = 1;
  for (int i = 0; i < axis; ++i) {
    batch *= x_dims[i];
  }
  for (int i = 0; i < y_dims.size(); ++i) {
    channels *= y_dims[i];
  }
  for (int i = y_dims.size() + axis; i < x_dims.size(); ++i) {
    num *= x_dims[i];
  }
  for (int i = 0; i < batch; ++i) {
    for (int j = 0; j < channels; ++j) {
      int offset = (i * channels + j) * num;
      const dtype* din_ptr = x_data + offset;
      const dtype diny_data = y_data[j];
Z
zhupengyang 已提交
67
      dtype* dout_ptr = out_data + offset;
68
      for (int k = 0; k < num; ++k) {
Z
zhupengyang 已提交
69 70 71
        *dout_ptr = *din_ptr + diny_data;
        dout_ptr++;
        din_ptr++;
72 73
      }
    }
T
tensor-tang 已提交
74 75 76 77 78 79
  }
}

TEST(elementwise_add, compute) {
  ElementwiseAddCompute elementwise_add;
  operators::ElementwiseParam param;
80
  lite::Tensor x, y, output, output_ref;
T
tensor-tang 已提交
81

82 83 84 85 86
  for (auto n : {1, 3, 4, 11}) {
    for (auto c : {1, 3, 4, 11}) {
      for (auto h : {1, 3, 4, 11}) {
        for (auto w : {1, 3, 4, 11}) {
          for (auto axis : {-1, 0, 1, 2, 3}) {
Z
zhupengyang 已提交
87 88 89 90 91 92 93
            for (auto yd :
                 {std::vector<int64_t>({n}), std::vector<int64_t>({c}),
                  std::vector<int64_t>({h}), std::vector<int64_t>({w}),
                  std::vector<int64_t>({n, c}), std::vector<int64_t>({c, h}),
                  std::vector<int64_t>({h, w}), std::vector<int64_t>({n, c, h}),
                  std::vector<int64_t>({c, h, w}),
                  std::vector<int64_t>({n, c, h, w})}) {
94
              auto x_dim = DDim(std::vector<int64_t>({n, c, h, w}));
Z
zhupengyang 已提交
95
              auto y_dim = DDim(yd);
96
              int axis_t = axis < 0 ? x_dim.size() - y_dim.size() : axis;
T
tensor-tang 已提交
97

98 99 100 101 102 103
              if (axis_t + y_dim.size() > 4) continue;
              bool flag = false;
              for (int i = 0; i < y_dim.size(); i++) {
                if (x_dim[i + axis_t] != y_dim[i]) flag = true;
              }
              if (flag) continue;
T
tensor-tang 已提交
104

105 106
              x.Resize(x_dim);
              y.Resize(y_dim);
Z
zhupengyang 已提交
107 108
              output.Resize(x_dim);
              output_ref.Resize(x_dim);
109
              auto* x_data = x.mutable_data<float>();
Z
zhupengyang 已提交
110
              auto* y_data = y.mutable_data<float>();
111 112
              auto* output_data = output.mutable_data<float>();
              auto* output_ref_data = output_ref.mutable_data<float>();
Z
zhupengyang 已提交
113
              for (int i = 0; i < x_dim.production(); i++) {
114 115
                x_data[i] = i;
              }
Z
zhupengyang 已提交
116
              for (int i = 0; i < y_dim.production(); i++) {
117 118 119 120 121 122
                y_data[i] = i;
              }
              param.X = &x;
              param.Y = &y;
              param.axis = axis;
              param.Out = &output;
Z
zhupengyang 已提交
123 124
              elementwise_add.SetParam(param);
              elementwise_add.Run();
125 126
              param.Out = &output_ref;
              elementwise_add_compute_ref<float>(param);
Z
zhupengyang 已提交
127
              for (int i = 0; i < output.dims().production(); i++) {
128 129 130 131 132 133 134
                EXPECT_NEAR(output_data[i], output_ref_data[i], 1e-5);
              }
            }
          }
        }
      }
    }
T
tensor-tang 已提交
135 136 137 138 139 140 141 142 143
  }
}

}  // namespace arm
}  // namespace kernels
}  // namespace lite
}  // namespace paddle

USE_LITE_KERNEL(elementwise_add, kARM, kFloat, kNCHW, def);