test_prepare_op.cc 8.7 KB
Newer Older
J
Jiabin Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

//
// Created by Jiabin on 2019-08-19.
//

#include <paddle/fluid/framework/op_registry.h>
20

J
Jiabin Yang 已提交
21 22 23
#include <memory>
#include <string>
#include <vector>
24

J
Jiabin Yang 已提交
25 26 27 28
#include "gtest/gtest.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/imperative/prepared_operator.h"
#include "paddle/fluid/imperative/type_defs.h"
29 30 31 32 33 34 35
#include "paddle/phi/core/kernel_registry.h"

PD_DECLARE_KERNEL(split, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(relu, CPU, ALL_LAYOUT);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_DECLARE_KERNEL(relu, GPU, ALL_LAYOUT);
#endif
J
Jiabin Yang 已提交
36 37 38 39 40 41 42 43

namespace imperative = paddle::imperative;
namespace platform = paddle::platform;
namespace framework = paddle::framework;

namespace paddle {
namespace imperative {

44 45 46
extern void TestHandleComplexGradToRealGradEager(
    const NameVarMap<egr::EagerVariable>& outs);

J
Jiabin Yang 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
static framework::VariableNameMap CreateVarNameMap(
    const framework::OpInfo& op_info, const std::string& op_type,
    const NameVarBaseMap& varbase_map, bool is_input) {
  if (op_info.proto_ == nullptr) {
    return {};
  }

  framework::VariableNameMap result;

  for (auto& var :
       is_input ? op_info.Proto().inputs() : op_info.Proto().outputs()) {
    auto it = varbase_map.find(var.name());
    if (it == varbase_map.end()) {
      PADDLE_ENFORCE_EQ(
          var.dispensable(), true,
62 63 64
          platform::errors::NotFound("Variable %s is not dispensable and "
                                     "there are no such var in inputs",
                                     var.name()));
J
Jiabin Yang 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
      result[var.name()] = {};
    } else {
      auto& var_vector = it->second;
      std::vector<std::string> args;
      args.reserve(var_vector.size());
      for (auto& var_base : var_vector) {
        args.emplace_back(var_base->Name());
      }
      result[var.name()] = std::move(args);
    }
  }
  return result;
}

using vb_vector = std::vector<std::shared_ptr<imperative::VarBase>>;

using var_pair = std::pair<std::string, vb_vector>;

TEST(test_prepare_op, test_prepare_op) {
  std::shared_ptr<imperative::VarBase> vin(
      new imperative::VarBase(false, "vin"));
  std::shared_ptr<imperative::VarBase> vout(
      new imperative::VarBase(false, "vout"));
  framework::OpDesc desc;
  platform::CPUPlace place;
  vin->MutableVar()->GetMutable<framework::LoDTensor>()->mutable_data<float>(
      place);
  var_pair x_pair = var_pair("X", vb_vector(1, vin));
  var_pair out_pair = var_pair("Out", vb_vector(1, vout));
  imperative::NameVarBaseMap ins = {x_pair};
  imperative::NameVarBaseMap outs = {out_pair};
  framework::AttributeMap split_attr_map;
  const auto& info = framework::OpInfoMap::Instance().Get("split");
98
  if (info.Checker()) info.Checker()->Check(&split_attr_map);
J
Jiabin Yang 已提交
99 100 101 102
  framework::VariableNameMap var_in_map =
      CreateVarNameMap(info, "split", ins, true);
  framework::VariableNameMap var_out_map =
      CreateVarNameMap(info, "split", outs, false);
103 104
  auto op = framework::OpRegistry::CreateOp("split", var_in_map, var_out_map,
                                            split_attr_map);
H
hong 已提交
105
  ASSERT_NO_FATAL_FAILURE(PreparedOp preparedOp = PreparedOp::Prepare(
106
                              ins, outs,
107
                              dynamic_cast<framework::OperatorWithKernel&>(*op),
108
                              place, split_attr_map, {}));
J
Jiabin Yang 已提交
109 110 111 112 113 114 115
}

const framework::Tensor* GetTensorFromVar(const framework::Variable& var);

TEST(test_prepare_op, test_get_tensor_from_var) {
  std::shared_ptr<imperative::VarBase> vout_error(
      new imperative::VarBase(false, "vout_error"));
116
  vout_error->MutableVar()->GetMutable<phi::SelectedRows>();
J
Jiabin Yang 已提交
117 118 119
  auto* ts = GetTensorFromVar(*vout_error->MutableVar());
  ASSERT_TRUE(ts != nullptr);
}
120

121
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
122 123 124 125 126 127 128 129 130 131 132 133 134 135
TEST(test_prepare_op, test_prepare_data) {
  std::shared_ptr<imperative::VarBase> vin(
      new imperative::VarBase(false, "vin"));
  std::shared_ptr<imperative::VarBase> vout(
      new imperative::VarBase(false, "vout"));

  framework::OpDesc desc;
  platform::CPUPlace cpu_place;
  platform::CUDAPlace gpu_place(0);
  std::vector<float> src_data(10, 2.0);
  std::vector<int64_t> dims = {2, 5};

  // prepare an cpu only input
  auto* vin_tensor = vin->MutableVar()->GetMutable<framework::LoDTensor>();
136
  vin_tensor->Resize(phi::make_ddim(dims));
137 138 139
  auto* vin_mutable_tensor = vin_tensor->mutable_data<float>(cpu_place);
  paddle::memory::Copy(cpu_place, vin_mutable_tensor, cpu_place,
                       src_data.data(), sizeof(float) * src_data.size());
J
Jiabin Yang 已提交
140

141 142 143 144
  var_pair x_pair = var_pair("X", vb_vector(1, vin));
  var_pair out_pair = var_pair("Out", vb_vector(1, vout));
  imperative::NameVarBaseMap ins = {x_pair};
  imperative::NameVarBaseMap outs = {out_pair};
145 146 147 148
  const std::string op_type = "relu";
  framework::AttributeMap attr_map;
  const auto& info = framework::OpInfoMap::Instance().Get(op_type);
  if (info.Checker()) info.Checker()->Check(&attr_map);
149
  framework::VariableNameMap var_in_map =
150
      CreateVarNameMap(info, op_type, ins, true);
151
  framework::VariableNameMap var_out_map =
152 153 154
      CreateVarNameMap(info, op_type, outs, false);
  auto op = framework::OpRegistry::CreateOp(op_type, var_in_map, var_out_map,
                                            attr_map);
155 156

  // test if it can be transformed to GPU place
157
  auto prepared_op = PreparedOp::Prepare(
158
      ins, outs, dynamic_cast<framework::OperatorWithKernel&>(*op), gpu_place,
159
      attr_map, {});
160
  PrepareData<imperative::VarBase>(
161
      dynamic_cast<framework::OperatorWithKernel&>(*op), ins,
162 163
      prepared_op.kernel_type());
  for (const auto& name_pair : ins) {
164 165 166 167 168 169 170 171
    for (const auto& vb : name_pair.second) {
      ASSERT_TRUE(platform::is_same_place(
          vb->Var().Get<framework::LoDTensor>().place(), gpu_place));
    }
  }
}
#endif

172
void TestPrepareDataSamePlace(framework::AttributeMap attr_map) {
173 174 175 176 177 178 179 180 181 182 183 184
  std::shared_ptr<imperative::VarBase> vin(
      new imperative::VarBase(false, "vin"));
  std::shared_ptr<imperative::VarBase> vout(
      new imperative::VarBase(false, "vout"));

  framework::OpDesc desc;
  platform::CPUPlace cpu_place;
  std::vector<float> src_data(10, 2.0);
  std::vector<int64_t> dims = {2, 5};

  // prepare an cpu only input
  auto* vin_tensor = vin->MutableVar()->GetMutable<framework::LoDTensor>();
185
  vin_tensor->Resize(phi::make_ddim(dims));
186 187 188 189 190 191 192 193
  auto* vin_mutable_tensor = vin_tensor->mutable_data<float>(cpu_place);
  paddle::memory::Copy(cpu_place, vin_mutable_tensor, cpu_place,
                       src_data.data(), sizeof(float) * src_data.size());

  var_pair x_pair = var_pair("X", vb_vector(1, vin));
  var_pair out_pair = var_pair("Out", vb_vector(1, vout));
  imperative::NameVarBaseMap ins = {x_pair};
  imperative::NameVarBaseMap outs = {out_pair};
194 195 196
  const std::string op_type = "relu";
  const auto& info = framework::OpInfoMap::Instance().Get(op_type);
  if (info.Checker()) info.Checker()->Check(&attr_map);
197
  framework::VariableNameMap var_in_map =
198
      CreateVarNameMap(info, op_type, ins, true);
199
  framework::VariableNameMap var_out_map =
200 201 202 203
      CreateVarNameMap(info, op_type, outs, false);

  auto op = framework::OpRegistry::CreateOp(op_type, var_in_map, var_out_map,
                                            attr_map);
204

T
tianshuo78520a 已提交
205
  // test if it never transferred on GPU place
206
  auto prepared_op = PreparedOp::Prepare(
207
      ins, outs, dynamic_cast<framework::OperatorWithKernel&>(*op), cpu_place,
208
      attr_map, {});
209
  PrepareData<imperative::VarBase>(
210
      dynamic_cast<framework::OperatorWithKernel&>(*op), ins,
211 212
      prepared_op.kernel_type());
  for (const auto& name_pair : ins) {
213 214 215 216 217 218
    for (const auto& vb : name_pair.second) {
      ASSERT_TRUE(platform::is_same_place(
          vb->Var().Get<framework::LoDTensor>().place(), cpu_place));
    }
  }
}
219 220 221 222 223

TEST(test_prepare_op, test_prepare_data_same_place) {
  TestPrepareDataSamePlace({});
}

224 225 226 227 228
TEST(test_prepare_op, test_complex_eager) {
  NameVarMap<egr::EagerVariable> outs = {};
  TestHandleComplexGradToRealGradEager(outs);
}

229 230 231 232 233
#ifdef PADDLE_WITH_MKLDNN
TEST(test_prepare_op, test_prepare_data_cpu_mkldnn) {
  TestPrepareDataSamePlace({{"use_mkldnn", true}});
}
#endif
J
Jiabin Yang 已提交
234 235 236
}  // namespace imperative
}  // namespace paddle

C
chentianyu03 已提交
237
USE_OP_ITSELF(split);
238
USE_OP_ITSELF(relu);
239 240 241
#ifdef PADDLE_WITH_MKLDNN
USE_OP_DEVICE_KERNEL(relu, MKLDNN);
#endif