test_eager_prim.cc 9.1 KB
Newer Older
J
Jiabin Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <sstream>

#include "glog/logging.h"
#include "gtest/gtest.h"
#include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h"
#include "paddle/fluid/eager/api/utils/hook_utils.h"
#include "paddle/fluid/eager/backward.h"
#include "paddle/fluid/prim/utils/utils.h"
#include "paddle/phi/core/dense_tensor.h"
24
#include "paddle/phi/core/flags.h"
J
Jiabin Yang 已提交
25 26
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
T
tianshuo78520a 已提交
27
#include "test/cpp/eager/test_utils.h"
28
#include "test/cpp/prim/init_env_utils.h"
J
Jiabin Yang 已提交
29

30
PHI_DECLARE_string(tensor_operants_mode);
31

J
Jiabin Yang 已提交
32 33 34 35 36 37
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(tanh, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(tanh_grad, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(pow, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(scale, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(multiply, CPU, ALL_LAYOUT);
38 39 40 41 42 43
PD_DECLARE_KERNEL(less_equal, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(less_than, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(equal, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(not_equal, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(greater_equal, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(greater_than, CPU, ALL_LAYOUT);
44 45 46 47
PD_DECLARE_KERNEL(bitwise_and, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(bitwise_or, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(bitwise_xor, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(bitwise_not, CPU, ALL_LAYOUT);
J
Jiabin Yang 已提交
48 49 50 51 52 53 54
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_DECLARE_KERNEL(full, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(tanh, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(tanh_grad, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(pow, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(scale, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(multiply, KPS, ALL_LAYOUT);
55 56 57 58 59 60
PD_DECLARE_KERNEL(less_equal, KPS, ALL_LAYOUT);
PD_DECLARE_KERNEL(less_than, KPS, ALL_LAYOUT);
PD_DECLARE_KERNEL(equal, KPS, ALL_LAYOUT);
PD_DECLARE_KERNEL(not_equal, KPS, ALL_LAYOUT);
PD_DECLARE_KERNEL(greater_equal, KPS, ALL_LAYOUT);
PD_DECLARE_KERNEL(greater_than, KPS, ALL_LAYOUT);
61 62 63 64 65
PD_DECLARE_KERNEL(bitwise_and, KPS, ALL_LAYOUT);
PD_DECLARE_KERNEL(bitwise_or, KPS, ALL_LAYOUT);
PD_DECLARE_KERNEL(bitwise_xor, KPS, ALL_LAYOUT);
PD_DECLARE_KERNEL(bitwise_not, KPS, ALL_LAYOUT);

J
Jiabin Yang 已提交
66 67
#endif

68 69
using eager_test::CreateTensorWithValue;

J
Jiabin Yang 已提交
70 71 72 73 74 75
namespace paddle {
namespace prim {

TEST(EagerPrim, TanhBackwardTest) {
  // 1. Initialized
  eager_test::InitEnv(paddle::platform::CPUPlace());
76 77
  FLAGS_tensor_operants_mode = "eager";
  paddle::prim::InitTensorOperants();
J
Jiabin Yang 已提交
78 79
  // 2. pre
  paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
80 81 82 83 84 85
  paddle::Tensor tensor0 = CreateTensorWithValue(ddim,
                                                 paddle::platform::CPUPlace(),
                                                 phi::DataType::FLOAT32,
                                                 phi::DataLayout::NCHW,
                                                 5.0 /*value*/,
                                                 true /*is_leaf*/);
J
Jiabin Yang 已提交
86
  ::egr::egr_utils_api::RetainGradForTensor(tensor0);
87 88 89 90 91 92
  paddle::Tensor tensor1 = CreateTensorWithValue(ddim,
                                                 paddle::platform::CPUPlace(),
                                                 phi::DataType::FLOAT32,
                                                 phi::DataLayout::NCHW,
                                                 5.0 /*value*/,
                                                 true /*is_leaf*/);
J
Jiabin Yang 已提交
93 94
  ::egr::egr_utils_api::RetainGradForTensor(tensor1);
  // 3. Run Forward once
95 96
  paddle::Tensor out0 = tanh_ad_func(tensor0);
  std::vector<paddle::Tensor> outs0 = {out0};
J
Jiabin Yang 已提交
97
  // Disable prim
98 99
  PrimCommonUtils::SetBwdPrimEnabled(false);
  ASSERT_FALSE(PrimCommonUtils::IsBwdPrimEnabled());
J
Jiabin Yang 已提交
100 101 102
  // 4. Run Backward
  egr::Backward(outs0, {}, false);

103 104
  paddle::Tensor out1 = tanh_ad_func(tensor1);
  std::vector<paddle::Tensor> outs1 = {out1};
105
  // Enable prim
106 107
  PrimCommonUtils::SetBwdPrimEnabled(true);
  ASSERT_TRUE(PrimCommonUtils::IsBwdPrimEnabled());
J
Jiabin Yang 已提交
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
  // 4. Run Backward
  ::egr::Backward(outs1, {}, false);
  VLOG(7)
      << "Target Grad is: "
      << std::static_pointer_cast<phi::DenseTensor>(
             ::egr::EagerUtils::unsafe_autograd_meta(tensor0)->Grad().impl())
             ->data<float>()[0];
  VLOG(7)
      << "Result Grad is: "
      << std::static_pointer_cast<phi::DenseTensor>(
             ::egr::EagerUtils::unsafe_autograd_meta(tensor1)->Grad().impl())
             ->data<float>()[0];
  // Examine Backward Grad
  eager_test::CompareGradTensorWithValue<float>(
      tensor1,
      std::static_pointer_cast<phi::DenseTensor>(
          ::egr::EagerUtils::unsafe_autograd_meta(tensor0)->Grad().impl())
          ->data<float>()[0]);
}

128 129 130 131 132 133 134
TEST(EagerPrim, LogicalOperantsTest) {
  // 1. Initialized
  eager_test::InitEnv(paddle::platform::CPUPlace());
  FLAGS_tensor_operants_mode = "eager";
  paddle::prim::InitTensorOperants();
  // 2. pre
  paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
135 136 137 138 139 140
  paddle::Tensor tensor0 = CreateTensorWithValue(ddim,
                                                 paddle::platform::CPUPlace(),
                                                 phi::DataType::INT32,
                                                 phi::DataLayout::NCHW,
                                                 1 /*value*/,
                                                 true /*is_leaf*/);
141
  ::egr::egr_utils_api::RetainGradForTensor(tensor0);
142 143 144 145 146 147
  paddle::Tensor tensor1 = CreateTensorWithValue(ddim,
                                                 paddle::platform::CPUPlace(),
                                                 phi::DataType::INT32,
                                                 phi::DataLayout::NCHW,
                                                 0 /*value*/,
                                                 true /*is_leaf*/);
148 149
  ::egr::egr_utils_api::RetainGradForTensor(tensor1);
  // 3. Run Forward once
150 151
  paddle::Tensor out0 = tensor0 & tensor1;
  paddle::Tensor out1 = bitwise_and_ad_func(tensor0, tensor1);
152 153 154 155 156 157 158 159 160 161 162 163
  EXPECT_EQ(out0.data<int>()[0], out1.data<int>()[0]);
  out0 = tensor0 | tensor1;
  out1 = bitwise_or_ad_func(tensor0, tensor1);
  EXPECT_EQ(out0.data<int>()[0], out1.data<int>()[0]);
  out0 = tensor0 ^ tensor1;
  out1 = bitwise_xor_ad_func(tensor0, tensor1);
  EXPECT_EQ(out0.data<int>()[0], out1.data<int>()[0]);
  out0 = ~tensor0;
  out1 = bitwise_not_ad_func(tensor0);
  EXPECT_EQ(out0.data<int>()[0], out1.data<int>()[0]);
}

164 165 166 167 168 169 170
TEST(EagerPrim, CompareOperantsTest) {
  // 1. Initialized
  eager_test::InitEnv(paddle::platform::CPUPlace());
  FLAGS_tensor_operants_mode = "eager";
  paddle::prim::InitTensorOperants();
  // 2. pre
  paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
171 172 173 174 175 176
  paddle::Tensor tensor0 = CreateTensorWithValue(ddim,
                                                 paddle::platform::CPUPlace(),
                                                 phi::DataType::INT32,
                                                 phi::DataLayout::NCHW,
                                                 1 /*value*/,
                                                 true /*is_leaf*/);
177
  ::egr::egr_utils_api::RetainGradForTensor(tensor0);
178 179 180 181 182 183
  paddle::Tensor tensor1 = CreateTensorWithValue(ddim,
                                                 paddle::platform::CPUPlace(),
                                                 phi::DataType::INT32,
                                                 phi::DataLayout::NCHW,
                                                 0 /*value*/,
                                                 true /*is_leaf*/);
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
  ::egr::egr_utils_api::RetainGradForTensor(tensor1);
  // 3. Run Forward once
  paddle::Tensor out0 = (tensor0 < tensor1);
  paddle::Tensor out1 = less_than_ad_func(tensor0, tensor1);
  EXPECT_EQ(out0.data<bool>()[0], out1.data<bool>()[0]);
  out0 = (tensor0 <= tensor1);
  out1 = less_equal_ad_func(tensor0, tensor1);
  EXPECT_EQ(out0.data<bool>()[0], out1.data<bool>()[0]);
  out0 = (tensor0 == tensor1);
  out1 = equal_ad_func(tensor0, tensor1);
  EXPECT_EQ(out0.data<bool>()[0], out1.data<bool>()[0]);
  out0 = (tensor0 != tensor1);
  out1 = not_equal_ad_func(tensor0, tensor1);
  EXPECT_EQ(out0.data<bool>()[0], out1.data<bool>()[0]);
  out0 = (tensor0 > tensor1);
  out1 = greater_than_ad_func(tensor0, tensor1);
  EXPECT_EQ(out0.data<bool>()[0], out1.data<bool>()[0]);
  out0 = (tensor0 >= tensor1);
  out1 = greater_equal_ad_func(tensor0, tensor1);
  EXPECT_EQ(out0.data<bool>()[0], out1.data<bool>()[0]);
}

J
Jiabin Yang 已提交
206
TEST(EagerPrim, TestFlags) {
207 208 209 210
  PrimCommonUtils::SetBwdPrimEnabled(true);
  ASSERT_TRUE(PrimCommonUtils::IsBwdPrimEnabled());
  PrimCommonUtils::SetBwdPrimEnabled(false);
  ASSERT_FALSE(PrimCommonUtils::IsBwdPrimEnabled());
J
Jiabin Yang 已提交
211 212 213 214
}

}  // namespace prim
}  // namespace paddle