reshape_compute_test.cc 6.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <gtest/gtest.h>
#include "lite/api/paddle_use_kernels.h"
#include "lite/api/paddle_use_ops.h"
#include "lite/core/arena/framework.h"
19
#include "lite/tests/utils/fill_data.h"
20 21 22 23 24 25 26 27 28 29 30 31 32

namespace paddle {
namespace lite {

class ReshapeComputeTester : public arena::TestCase {
 protected:
  // common attributes for this op.
  std::string op_type_ = "reshape2";
  std::string input_ = "x";
  std::string output_ = "out";
  std::string xshape_ = "xshape";
  std::vector<std::string> shape_tensor_vct_;
  std::string shape_tensor_;
33
  DDim dims_;
34 35 36 37 38 39
  std::vector<int> shape_;
  bool inplace_ = false;

 public:
  ReshapeComputeTester(const Place& place,
                       const std::string& alias,
40
                       DDim dims,
41 42 43 44
                       std::vector<int> shape,
                       bool is_shape_tensor_vct = false,
                       bool is_shape_tensor = false,
                       bool is_shape = true)
45
      : TestCase(place, alias), dims_(dims) {
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
    if (is_shape_tensor_vct) {
      for (size_t i = 0; i < shape.size(); i++) {
        shape_tensor_vct_.emplace_back(op_type_ + "/shape" + std::to_string(i));
      }
    } else if (is_shape_tensor) {
      shape_tensor_ = op_type_ + "/shape";
    } else if (is_shape) {
      shape_ = shape;
    } else {
      LOG(FATAL) << "must set new shape!";
    }
  }

  void RunBaseline(Scope* scope) override {
    auto* out = scope->NewTensor(output_);
    CHECK(out);

    auto* x = scope->FindTensor(input_);

    std::vector<int> out_shape;
    if (shape_tensor_vct_.size() > 0) {
      for (auto shape_tensor : shape_tensor_vct_) {
        out_shape.push_back(scope->FindTensor(shape_tensor)->data<int>()[0]);
      }
    } else if (!shape_tensor_.empty()) {
      auto shape_tensor = scope->FindTensor(shape_tensor_);
      auto shape_tensor_data = shape_tensor->data<int>();
      out_shape = std::vector<int>(shape_tensor_data,
                                   shape_tensor_data + shape_tensor->numel());
    } else if (!shape_.empty()) {
      out_shape = shape_;
    } else {
      LOG(FATAL) << "must set new shape!";
    }

    std::vector<int64_t> final_out_shape(out_shape.size(), 1);
    int unk_dim_idx = -1;
    int cap = 1;
    for (size_t i = 0; i < out_shape.size(); i++) {
      if (out_shape[i] == -1) {
        CHECK_EQ(unk_dim_idx, -1);
        unk_dim_idx = i;
      } else if (out_shape[i] == 0) {
89 90
        CHECK_LE(i, dims_.size());
        final_out_shape[i] = dims_[i];
91 92 93 94 95 96 97 98 99
      } else if (out_shape[i] > 0) {
        final_out_shape[i] = out_shape[i];
      } else {
        LOG(FATAL) << "invalid shape";
      }
      cap *= final_out_shape[i];
    }

    if (unk_dim_idx > -1) {
100
      final_out_shape[unk_dim_idx] = dims_.production() / cap;
101 102 103 104 105 106
    }

    out->Resize(final_out_shape);

    auto x_data = x->data<float>();
    auto out_data = out->mutable_data<float>();
107
    memcpy(out_data, x_data, sizeof(float) * dims_.production());
108 109 110

    if (op_type_ == "reshape2") {
      auto* xshape = scope->NewTensor(xshape_);
111
      auto xshape_dims = dims_.Vectorize();
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
      xshape_dims.insert(xshape_dims.begin(), 0);
      xshape->Resize(xshape_dims);
    }
  }

  void PrepareOpDesc(cpp::OpDesc* op_desc) {
    op_desc->SetType(op_type_);
    op_desc->SetInput("X", {input_});
    if (shape_tensor_vct_.size() > 0) {
      op_desc->SetInput("ShapeTensor", shape_tensor_vct_);
    } else if (!shape_tensor_.empty()) {
      op_desc->SetInput("Shape", {shape_tensor_});
    } else if (shape_.size() > 0) {
      op_desc->SetAttr("shape", shape_);
    } else {
      LOG(FATAL) << "invalid shape";
    }
    op_desc->SetOutput("Out", {output_});
    if (op_type_ == "reshape2") {
      op_desc->SetOutput("XShape", {xshape_});
    }
    op_desc->SetAttr("inplace", inplace_);
  }

  void PrepareData() override {
137 138 139
    std::vector<float> din(dims_.production());
    fill_data_rand(din.data(), -1.f, 1.f, dims_.production());
    SetCommonTensor(input_, dims_, din.data());
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161

    if (shape_tensor_vct_.size() > 0) {
      for (size_t i = 0; i < shape_.size(); i++) {
        std::vector<int> shape_data{shape_[i]};
        SetCommonTensor(shape_tensor_vct_[i],
                        DDim(std::vector<int64_t>{1}),
                        shape_data.data());
      }
    }
    if (!shape_tensor_.empty()) {
      SetCommonTensor(
          shape_tensor_,
          DDim(std::vector<int64_t>{static_cast<int64_t>(shape_.size())}),
          shape_.data());
    }
  }
};

TEST(Reshape, precision) {
  LOG(INFO) << "test Reshape op";
  float abs_error = 2e-5;
  Place place;
162 163 164 165
#if defined(LITE_WITH_NPU)
  place = TARGET(kNPU);
  abs_error = 1e-2;  // Using fp16 in NPU
#elif defined(LITE_WITH_XPU)
166 167 168 169 170
  place = TARGET(kXPU);
#else
  return;
#endif

171
  DDim dims{{2, 3, 4, 5}};
172 173 174 175 176 177 178 179
  std::vector<std::vector<int>> shapes{{5, 4, 3, 2},
                                       {2, 3, 20},
                                       {2, 60},
                                       {120},
                                       {2, 3, -1},
                                       {0, 0, 20},
                                       {0, 0, -1}};
  for (auto shape : shapes) {
180 181 182
#ifdef LITE_WITH_NPU
    if (dims.size() > 4 || shape.size() > 4) continue;
#endif
183
    std::unique_ptr<arena::TestCase> tester(
184
        new ReshapeComputeTester(place, "def", dims, shape));
185 186 187 188 189 190 191
    arena::Arena arena(std::move(tester), place, abs_error);
    arena.TestPrecision({"xshape"});
  }
}

}  // namespace lite
}  // namespace paddle