test_eager.cc 4.7 KB
Newer Older
J
Jiabin Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <memory>
#include <set>
#include <string>
#include <vector>

#include "gtest/gtest.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/imperative/basic_engine.h"
#include "paddle/fluid/imperative/execution_context.h"
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/imperative/tracer.h"
#include "paddle/fluid/imperative/var_helper.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/device_context.h"
29
#include "paddle/phi/core/compat/type_defs.h"
J
Jiabin Yang 已提交
30 31 32 33

namespace paddle {
namespace imperative {
extern std::string LayerDebugString(const std::string& op_type,
34 35
                                    const NameVarMap<egr::EagerVariable>& ins,
                                    const NameVarMap<egr::EagerVariable>& outs);
J
Jiabin Yang 已提交
36 37 38 39 40 41 42 43

extern std::shared_ptr<GradOpNode> CreateGradOpNode(
    const framework::OperatorBase& op, const NameTensorMap& ins,
    const NameTensorMap& outs, const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs, const platform::Place& place,
    const std::map<std::string, std::string>& inplace_map);

TEST(test_eager, eager_debug) {
44 45 46 47 48 49
  std::shared_ptr<egr::EagerVariable> x_in(new egr::EagerVariable("x_in"));
  std::shared_ptr<egr::EagerVariable> y_in(new egr::EagerVariable("y_in"));
  std::shared_ptr<egr::EagerVariable> vout(new egr::EagerVariable("vout"));
  imperative::NameVarMap<egr::EagerVariable> ins = {{"X", {x_in}},
                                                    {"Y", {y_in}}};
  imperative::NameVarMap<egr::EagerVariable> outs = {{"Out", {vout}}};
J
Jiabin Yang 已提交
50 51 52 53 54 55
  LayerDebugString("mul", ins, outs);
}
TEST(test_create_node, eager_node) {
  auto op = framework::OpRegistry::CreateOp("mul", {}, {}, {}, false);
  framework::Scope scope;
  auto ctx = framework::RuntimeContext({}, {});
56 57 58
  imperative::NameVarMap<egr::EagerVariable> ins = {{"X", {nullptr}},
                                                    {"Y", {nullptr}}};
  imperative::NameVarMap<egr::EagerVariable> outs = {{"Out", {nullptr}}};
J
Jiabin Yang 已提交
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
  CreateGradOpNode((*op.get()), ins, outs, framework::AttributeMap{},
                   framework::AttributeMap{}, platform::CPUPlace(), {});
}
TEST(test_var_helper, eager_var_helper) {
  framework::Variable var0, var1, var2, var3, var4, var5, var6, var7, var8;
  InitializeVariable(&var0, paddle::framework::proto::VarType::FEED_MINIBATCH);
  InitializeVariable(&var1, paddle::framework::proto::VarType::STEP_SCOPES);
  InitializeVariable(&var2, paddle::framework::proto::VarType::LOD_RANK_TABLE);
  InitializeVariable(&var3,
                     paddle::framework::proto::VarType::LOD_TENSOR_ARRAY);
  InitializeVariable(&var4, paddle::framework::proto::VarType::STRINGS);
  InitializeVariable(&var5, paddle::framework::proto::VarType::VOCAB);
  InitializeVariable(&var6, paddle::framework::proto::VarType::READER);
  InitializeVariable(&var7, paddle::framework::proto::VarType::RAW);
  ASSERT_ANY_THROW(
      InitializeVariable(&var8, paddle::framework::proto::VarType::FP64));

76 77
  auto egr_tensor = std::make_shared<egr::EagerVariable>();
  auto egr_tensor2 = std::make_shared<egr::EagerVariable>();
J
Jiabin Yang 已提交
78
  egr_tensor->MutableVar()
79
      ->GetMutable<phi::SelectedRows>()
J
Jiabin Yang 已提交
80 81 82 83
      ->mutable_value()
      ->mutable_data<float>(platform::CPUPlace());
  egr_tensor2->MutableVar()->GetMutable<framework::LoDRankTable>();
  VLOG(6) << "egr_tensor create with ";
84 85
  ASSERT_TRUE(platform::is_cpu_place(GetPlace<egr::EagerVariable>(egr_tensor)));
  ASSERT_TRUE(GetDataType<egr::EagerVariable>(egr_tensor) ==
J
Jiabin Yang 已提交
86
              framework::proto::VarType::FP32);
87
  GetCachedValue<egr::EagerVariable>(
J
Jiabin Yang 已提交
88 89
      egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32,
                                          platform::CPUPlace()));
90
  SetCachedValue<egr::EagerVariable>(
J
Jiabin Yang 已提交
91 92 93
      egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32,
                                          platform::CPUPlace()),
      egr_tensor2);
94 95
  ASSERT_ANY_THROW(GetPlace<egr::EagerVariable>(egr_tensor2));
  ASSERT_ANY_THROW(SetType<egr::EagerVariable>(
J
Jiabin Yang 已提交
96 97 98 99 100 101
      egr_tensor, paddle::framework::proto::VarType::LOD_TENSOR_ARRAY));
}
}  // namespace imperative
}  // namespace paddle

USE_OP(mul);