未验证 提交 68d81d0e 编写于 作者: H huangjiyi 提交者: GitHub

Fix some compile errors with C++17 (#54282)

上级 d338b2f8
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <vector> #include <vector>
#include "paddle/fluid/distributed/collective/types.h" #include "paddle/fluid/distributed/collective/types.h"
#include "paddle/fluid/eager/api/utils/tensor_utils.h" // NOTE: this header is required somewhere #include "paddle/fluid/eager/utils.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/device_context.h" #include "paddle/phi/core/device_context.h"
#include "paddle/phi/core/enforce.h" #include "paddle/phi/core/enforce.h"
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "paddle/fluid/distributed/collective/process_group.h" #include "paddle/fluid/distributed/collective/process_group.h"
#include "paddle/fluid/eager/accumulation/accumulation_node.h" #include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/api/utils/hook_utils.h" #include "paddle/fluid/eager/api/utils/hook_utils.h"
#include "paddle/fluid/eager/api/utils/tensor_utils.h"
#include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/utils.h" #include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/math/concat_and_split.h"
......
set(eager_deps set(eager_deps
phi phi
hook_utils hook_utils
tensor_utils
utils utils
global_utils global_utils
backward backward
......
...@@ -5,4 +5,4 @@ add_subdirectory(generated) ...@@ -5,4 +5,4 @@ add_subdirectory(generated)
cc_library( cc_library(
eager_api eager_api
SRCS all.cc SRCS all.cc
DEPS tensor_utils hook_utils global_utils eager_scale) DEPS hook_utils global_utils eager_scale)
...@@ -17,4 +17,3 @@ ...@@ -17,4 +17,3 @@
#include "paddle/fluid/eager/api/generated/eager_generated/forwards/scale.h" #include "paddle/fluid/eager/api/generated/eager_generated/forwards/scale.h"
#include "paddle/fluid/eager/api/utils/global_utils.h" #include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/eager/api/utils/hook_utils.h" #include "paddle/fluid/eager/api/utils/hook_utils.h"
#include "paddle/fluid/eager/api/utils/tensor_utils.h"
...@@ -4,21 +4,13 @@ cc_library( ...@@ -4,21 +4,13 @@ cc_library(
DEPS place tracer) DEPS place tracer)
if(NOT (NOT WITH_PYTHON AND ON_INFER)) if(NOT (NOT WITH_PYTHON AND ON_INFER))
cc_library(
tensor_utils
SRCS tensor_utils.cc
DEPS phi autograd_meta grad_node_info accumulation_node)
cc_library( cc_library(
hook_utils hook_utils
SRCS hook_utils.cc SRCS hook_utils.cc
DEPS phi tensor_utils autograd_meta grad_node_info utils accumulation_node) DEPS phi autograd_meta grad_node_info utils accumulation_node)
else() else()
cc_library(
tensor_utils
SRCS tensor_utils.cc
DEPS phi autograd_meta grad_node_info)
cc_library( cc_library(
hook_utils hook_utils
SRCS hook_utils.cc SRCS hook_utils.cc
DEPS phi tensor_utils autograd_meta grad_node_info utils) DEPS phi autograd_meta grad_node_info utils)
endif() endif()
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include "paddle/fluid/eager/api/utils/hook_utils.h" #include "paddle/fluid/eager/api/utils/hook_utils.h"
#include "paddle/fluid/eager/accumulation/accumulation_node.h" #include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/api/utils/tensor_utils.h"
#include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/utils.h" #include "paddle/fluid/eager/utils.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
...@@ -38,7 +37,7 @@ int64_t RegisterGradientHookForTensor( ...@@ -38,7 +37,7 @@ int64_t RegisterGradientHookForTensor(
void RegisterReduceHookForTensor(const paddle::Tensor& tensor, void RegisterReduceHookForTensor(const paddle::Tensor& tensor,
const std::function<void()>& hook) { const std::function<void()>& hook) {
if (IsLeafTensor(tensor)) { if (EagerUtils::IsLeafTensor(tensor)) {
VLOG(6) << "Register ReduceHook for leaf tensor"; VLOG(6) << "Register ReduceHook for leaf tensor";
std::shared_ptr<GradNodeBase> grad_node = EagerUtils::grad_node(tensor); std::shared_ptr<GradNodeBase> grad_node = EagerUtils::grad_node(tensor);
PADDLE_ENFORCE( PADDLE_ENFORCE(
...@@ -57,7 +56,7 @@ void RegisterReduceHookForTensor(const paddle::Tensor& tensor, ...@@ -57,7 +56,7 @@ void RegisterReduceHookForTensor(const paddle::Tensor& tensor,
} }
void RetainGradForTensor(const paddle::Tensor& tensor) { void RetainGradForTensor(const paddle::Tensor& tensor) {
if (IsLeafTensor(tensor)) { if (EagerUtils::IsLeafTensor(tensor)) {
// Leaf tensor's grad will always be retained // Leaf tensor's grad will always be retained
// Refer to implementation of AccumulationNode for more details // Refer to implementation of AccumulationNode for more details
return; return;
......
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/eager/api/utils/tensor_utils.h"
#include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/grad_node_info.h"
#include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/phi_utils.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/phi/api/all.h"
namespace egr {
namespace egr_utils_api {
bool IsLeafTensor(const paddle::Tensor& target) {
std::shared_ptr<GradNodeBase> grad_node = EagerUtils::grad_node(target);
if (!grad_node ||
std::dynamic_pointer_cast<GradNodeAccumulation>(grad_node)) {
return true;
}
return false;
}
paddle::Tensor CreateTensorWithValue(const phi::DDim& ddim,
const paddle::platform::Place& place,
const phi::DataType& dtype,
const phi::DataLayout& layout,
float value,
bool is_leaf) {
paddle::Tensor out = paddle::experimental::full(
phi::vectorize(ddim), paddle::experimental::Scalar(value), dtype, place);
auto meta = EagerUtils::autograd_meta(&out);
if (is_leaf) {
auto accumulation_node = std::make_shared<GradNodeAccumulation>(meta);
meta->SetGradNode(accumulation_node);
meta->SetStopGradient(false);
}
return out;
}
} // namespace egr_utils_api
} // namespace egr
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/phi/api/include/tensor.h"
namespace egr {
namespace egr_utils_api {
// If and only if the tensor holds an AccumulationNode
// Then it's treated as a leaf tensor
bool IsLeafTensor(const paddle::Tensor& target);
paddle::Tensor CreateTensorWithValue(const phi::DDim& ddim,
const paddle::platform::Place& place,
const phi::DataType& dtype,
const phi::DataLayout& layout,
float value,
bool is_leaf = true);
} // namespace egr_utils_api
} // namespace egr
...@@ -133,7 +133,7 @@ class AutogradMeta : public AbstractAutogradMeta { ...@@ -133,7 +133,7 @@ class AutogradMeta : public AbstractAutogradMeta {
private: private:
// TODO(jiabin) :Should we use pointer instead of object? // TODO(jiabin) :Should we use pointer instead of object?
std::shared_ptr<paddle::Tensor> grad_{std::make_shared<paddle::Tensor>()}; std::shared_ptr<paddle::Tensor> grad_ = std::make_shared<paddle::Tensor>();
// GradNodeBase is base class of all grad op which is a // GradNodeBase is base class of all grad op which is a
// wrapper for grad op. This class will make grad op easy // wrapper for grad op. This class will make grad op easy
......
...@@ -178,6 +178,29 @@ void EagerUtils::SetOutRankWithSlot(AutogradMeta* target, size_t slot_id) { ...@@ -178,6 +178,29 @@ void EagerUtils::SetOutRankWithSlot(AutogradMeta* target, size_t slot_id) {
target->SetSingleOutRankWithSlot(slot_id, 0); target->SetSingleOutRankWithSlot(slot_id, 0);
} }
bool EagerUtils::IsLeafTensor(const paddle::Tensor& target) {
std::shared_ptr<GradNodeBase> grad_node_ptr = grad_node(target);
if (!grad_node_ptr ||
std::dynamic_pointer_cast<GradNodeAccumulation>(grad_node_ptr)) {
return true;
}
return false;
}
void EagerUtils::CheckInplace(const paddle::Tensor& target,
const AutogradMeta* autograd_meta,
bool require_any_grad) {
if (require_any_grad && autograd_meta) {
PADDLE_ENFORCE_EQ(!autograd_meta->StopGradient() && IsLeafTensor(target),
false,
paddle::platform::errors::InvalidArgument(
"Leaf Var (%s) that doesn't stop gradient "
"can't use inplace strategy.",
target.name()));
}
}
std::shared_ptr<egr::EagerVariable> EagerUtils::TrySyncToVar( std::shared_ptr<egr::EagerVariable> EagerUtils::TrySyncToVar(
const paddle::Tensor& tensor) { const paddle::Tensor& tensor) {
return std::make_shared<egr::EagerVariable>(tensor); return std::make_shared<egr::EagerVariable>(tensor);
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#pragma once #pragma once
#include "paddle/fluid/eager/api/utils/tensor_utils.h"
#include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/eager_tensor.h" #include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/fluid/eager/grad_node_info.h" #include "paddle/fluid/eager/grad_node_info.h"
...@@ -149,19 +148,13 @@ class EagerUtils { ...@@ -149,19 +148,13 @@ class EagerUtils {
iter.apply(std::forward<Args>(args)...); iter.apply(std::forward<Args>(args)...);
} }
// If and only if the tensor holds an AccumulationNode
// Then it's treated as a leaf tensor
static bool IsLeafTensor(const paddle::Tensor& target);
static void CheckInplace(const paddle::Tensor& target, static void CheckInplace(const paddle::Tensor& target,
const AutogradMeta* autograd_meta, const AutogradMeta* autograd_meta,
bool require_any_grad) { bool require_any_grad);
if (require_any_grad && autograd_meta) {
PADDLE_ENFORCE_EQ(!autograd_meta->StopGradient() &&
egr::egr_utils_api::IsLeafTensor(target),
false,
paddle::platform::errors::InvalidArgument(
"Leaf Var (%s) that doesn't stop gradient "
"can't use inplace strategy.",
target.name()));
}
}
// View Strategy // View Strategy
static void HandleViewBetweenInputAndOutput( static void HandleViewBetweenInputAndOutput(
......
...@@ -15,24 +15,6 @@ if(WITH_MKLDNN) ...@@ -15,24 +15,6 @@ if(WITH_MKLDNN)
${mkldnn_quantizer_cfg} ${mkldnn_quantizer_cfg}
PARENT_SCOPE) PARENT_SCOPE)
endif() endif()
if(WIN32)
cc_library(
analysis_config
SRCS analysis_config.cc
DEPS ${mkldnn_quantizer_cfg} lod_tensor paddle_pass_builder table_printer
utf8proc)
else()
cc_library(
analysis_config
SRCS analysis_config.cc
DEPS analysis_helper
processgroup_comm_utils
${mkldnn_quantizer_cfg}
lod_tensor
paddle_pass_builder
table_printer
utf8proc)
endif()
cc_library( cc_library(
paddle_infer_contrib paddle_infer_contrib
...@@ -42,8 +24,8 @@ cc_library(paddle_pass_builder SRCS paddle_pass_builder.cc) ...@@ -42,8 +24,8 @@ cc_library(paddle_pass_builder SRCS paddle_pass_builder.cc)
set(paddle_inference_api_deps set(paddle_inference_api_deps
reset_tensor_array reset_tensor_array
analysis_config
paddle_infer_contrib paddle_infer_contrib
paddle_pass_builder
zero_copy_tensor zero_copy_tensor
trainer_desc_proto trainer_desc_proto
custom_operator custom_operator
...@@ -56,6 +38,7 @@ endif() ...@@ -56,6 +38,7 @@ endif()
if(WITH_CUSTOM_DEVICE) if(WITH_CUSTOM_DEVICE)
set(paddle_inference_api_deps ${paddle_inference_api_deps} phi) set(paddle_inference_api_deps ${paddle_inference_api_deps} phi)
endif() endif()
if(WIN32) if(WIN32)
cc_library( cc_library(
paddle_inference_api paddle_inference_api
...@@ -68,12 +51,18 @@ else() ...@@ -68,12 +51,18 @@ else()
DEPS executor paddle_inference_io ${paddle_inference_api_deps}) DEPS executor paddle_inference_io ${paddle_inference_api_deps})
endif() endif()
cc_library(
analysis_config
SRCS analysis_config.cc
DEPS ${mkldnn_quantizer_cfg} paddle_inference_api lod_tensor
paddle_pass_builder table_printer utf8proc)
if(WIN32) if(WIN32)
target_link_libraries(paddle_inference_api phi) target_link_libraries(paddle_inference_api phi)
endif() endif()
set(inference_deps ${analysis_deps} paddle_inference_api analysis set(inference_deps ${analysis_deps} paddle_inference_api analysis
naive_executor ${GLOB_PASS_LIB}) analysis_config naive_executor ${GLOB_PASS_LIB})
if(WITH_GPU AND TENSORRT_FOUND) if(WITH_GPU AND TENSORRT_FOUND)
set(inference_deps ${inference_deps} tensorrt_engine tensorrt_converter) set(inference_deps ${inference_deps} tensorrt_engine tensorrt_converter)
......
...@@ -356,7 +356,7 @@ class FusedMultiTransformerINT8OpKernel : public framework::OpKernel<T> { ...@@ -356,7 +356,7 @@ class FusedMultiTransformerINT8OpKernel : public framework::OpKernel<T> {
num_head, num_head,
dim_head, dim_head,
time_step->data<int>()[0], time_step->data<int>()[0],
1. / sqrt(dim_head)); 1. / std::sqrt(dim_head));
} else if (cache_kv_out) { // generation context stage } else if (cache_kv_out) { // generation context stage
// TODO(wangxi): can remove dropout in inference // TODO(wangxi): can remove dropout in inference
fmha_compute.ComputeForward(qkv_out, fmha_compute.ComputeForward(qkv_out,
......
...@@ -375,7 +375,7 @@ class FusedMultiTransformerOpKernel : public framework::OpKernel<T> { ...@@ -375,7 +375,7 @@ class FusedMultiTransformerOpKernel : public framework::OpKernel<T> {
dim_head, dim_head,
time_step->data<int>()[0], time_step->data<int>()[0],
rotary_emb_dims, rotary_emb_dims,
1. / sqrt(dim_head)); 1. / std::sqrt(dim_head));
} else if (cache_kv_out) { // generation context stage } else if (cache_kv_out) { // generation context stage
const phi::DenseTensor *pre_cache_kv_tensor = const phi::DenseTensor *pre_cache_kv_tensor =
pre_caches.size() > 0 ? pre_caches[i] : nullptr; pre_caches.size() > 0 ? pre_caches[i] : nullptr;
...@@ -1049,7 +1049,7 @@ class FusedMultiTransformerOpKernel : public framework::OpKernel<T> { ...@@ -1049,7 +1049,7 @@ class FusedMultiTransformerOpKernel : public framework::OpKernel<T> {
dim_head, dim_head,
time_step->data<int>()[0], time_step->data<int>()[0],
rotary_emb_dims, rotary_emb_dims,
1. / sqrt(dim_head)); 1. / std::sqrt(dim_head));
} else if (cache_kv_out) { // generation context stage } else if (cache_kv_out) { // generation context stage
const phi::DenseTensor *pre_cache_kv_tensor = const phi::DenseTensor *pre_cache_kv_tensor =
pre_caches.size() > 0 ? pre_caches[i] : nullptr; pre_caches.size() > 0 ? pre_caches[i] : nullptr;
......
...@@ -1254,7 +1254,7 @@ static PyObject* eager_api_set_master_grads(PyObject* self, ...@@ -1254,7 +1254,7 @@ static PyObject* eager_api_set_master_grads(PyObject* self,
auto tensor_list = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0); auto tensor_list = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);
for (auto& tensor : tensor_list) { for (auto& tensor : tensor_list) {
VLOG(6) << "set master_grad for tensor: " << tensor.name(); VLOG(6) << "set master_grad for tensor: " << tensor.name();
if (!egr::egr_utils_api::IsLeafTensor(tensor)) { if (!egr::EagerUtils::IsLeafTensor(tensor)) {
continue; continue;
} }
paddle::Tensor* grad = egr::EagerUtils::mutable_grad(tensor); paddle::Tensor* grad = egr::EagerUtils::mutable_grad(tensor);
......
...@@ -575,7 +575,7 @@ static PyObject* tensor_clear_gradient(TensorObject* self, ...@@ -575,7 +575,7 @@ static PyObject* tensor_clear_gradient(TensorObject* self,
} }
paddle::Tensor* grad; paddle::Tensor* grad;
bool is_leaf = egr::egr_utils_api::IsLeafTensor(self->tensor); bool is_leaf = egr::EagerUtils::IsLeafTensor(self->tensor);
if (is_leaf) { if (is_leaf) {
grad = egr::EagerUtils::mutable_grad(self->tensor); grad = egr::EagerUtils::mutable_grad(self->tensor);
PADDLE_ENFORCE(grad != nullptr, PADDLE_ENFORCE(grad != nullptr,
...@@ -631,7 +631,7 @@ static PyObject* tensor__zero_grads(TensorObject* self, ...@@ -631,7 +631,7 @@ static PyObject* tensor__zero_grads(TensorObject* self,
EAGER_TRY EAGER_TRY
VLOG(4) << "ZeroGrads " << self->tensor.name(); VLOG(4) << "ZeroGrads " << self->tensor.name();
if (egr::egr_utils_api::IsLeafTensor(self->tensor)) { if (egr::EagerUtils::IsLeafTensor(self->tensor)) {
eager_gil_scoped_release guard; eager_gil_scoped_release guard;
// Add RetainGrad as PostHook to AccumulationNode // Add RetainGrad as PostHook to AccumulationNode
paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor); paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor);
...@@ -1169,7 +1169,7 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self, ...@@ -1169,7 +1169,7 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
if (egr::Controller::Instance().HasGrad()) { if (egr::Controller::Instance().HasGrad()) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
egr::egr_utils_api::IsLeafTensor(self->tensor) && egr::EagerUtils::IsLeafTensor(self->tensor) &&
!egr::EagerUtils::autograd_meta(&self->tensor)->StopGradient(), !egr::EagerUtils::autograd_meta(&self->tensor)->StopGradient(),
false, false,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
...@@ -1352,7 +1352,7 @@ static PyObject* tensor_register_grad_hook(TensorObject* self, ...@@ -1352,7 +1352,7 @@ static PyObject* tensor_register_grad_hook(TensorObject* self,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
int64_t hook_id; int64_t hook_id;
if (egr::egr_utils_api::IsLeafTensor(self->tensor)) { if (egr::EagerUtils::IsLeafTensor(self->tensor)) {
VLOG(6) << "Register hook for leaf tensor: " << self->tensor.name(); VLOG(6) << "Register hook for leaf tensor: " << self->tensor.name();
auto autograd_meta = egr::EagerUtils::unsafe_autograd_meta(self->tensor); auto autograd_meta = egr::EagerUtils::unsafe_autograd_meta(self->tensor);
...@@ -1419,7 +1419,7 @@ static PyObject* tensor_register_reduce_hook(TensorObject* self, ...@@ -1419,7 +1419,7 @@ static PyObject* tensor_register_reduce_hook(TensorObject* self,
std::shared_ptr<egr::GradNodeBase> grad_node = std::shared_ptr<egr::GradNodeBase> grad_node =
egr::EagerUtils::grad_node(self->tensor); egr::EagerUtils::grad_node(self->tensor);
PADDLE_ENFORCE_EQ(egr::egr_utils_api::IsLeafTensor(self->tensor), PADDLE_ENFORCE_EQ(egr::EagerUtils::IsLeafTensor(self->tensor),
true, true,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Only can register backward hook for leaf Tensor.")); "Only can register backward hook for leaf Tensor."));
...@@ -1923,7 +1923,7 @@ static PyObject* tensor__unset_fake_empty(TensorObject* self, ...@@ -1923,7 +1923,7 @@ static PyObject* tensor__unset_fake_empty(TensorObject* self,
"Detected NULL grad. Please check if you have manually " "Detected NULL grad. Please check if you have manually "
"cleared the grad inside autograd_meta")); "cleared the grad inside autograd_meta"));
bool is_leaf = egr::egr_utils_api::IsLeafTensor(self->tensor); bool is_leaf = egr::EagerUtils::IsLeafTensor(self->tensor);
if (is_leaf) { if (is_leaf) {
std::static_pointer_cast<egr::GradNodeAccumulation>( std::static_pointer_cast<egr::GradNodeAccumulation>(
egr::EagerUtils::grad_node(self->tensor)) egr::EagerUtils::grad_node(self->tensor))
......
...@@ -20,7 +20,6 @@ limitations under the License. */ ...@@ -20,7 +20,6 @@ limitations under the License. */
#include "paddle/fluid/eager/accumulation/accumulation_node.h" #include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/api/all.h" #include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/api/utils/tensor_utils.h"
#include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/utils.h" #include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/memory/allocation/allocator.h" #include "paddle/fluid/memory/allocation/allocator.h"
...@@ -75,7 +74,7 @@ PyObject* tensor_properties_get_type(TensorObject* self, void* closure) { ...@@ -75,7 +74,7 @@ PyObject* tensor_properties_get_type(TensorObject* self, void* closure) {
PyObject* tensor_properties_is_leaf(TensorObject* self, void* closure) { PyObject* tensor_properties_is_leaf(TensorObject* self, void* closure) {
EAGER_TRY EAGER_TRY
return ToPyObject(egr::egr_utils_api::IsLeafTensor(self->tensor)); return ToPyObject(egr::EagerUtils::IsLeafTensor(self->tensor));
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
...@@ -115,7 +114,7 @@ int tensor_properties_set_grad(TensorObject* self, ...@@ -115,7 +114,7 @@ int tensor_properties_set_grad(TensorObject* self,
EAGER_TRY EAGER_TRY
auto src = CastPyArg2Tensor(value, 0); auto src = CastPyArg2Tensor(value, 0);
PADDLE_ENFORCE( PADDLE_ENFORCE(
egr::egr_utils_api::IsLeafTensor(self->tensor), egr::EagerUtils::IsLeafTensor(self->tensor),
paddle::platform::errors::Fatal("Only leaf Tensor can be set grad.")); paddle::platform::errors::Fatal("Only leaf Tensor can be set grad."));
paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor); paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor);
......
...@@ -398,7 +398,7 @@ PyObject* pylayer_method_apply(PyObject* cls, ...@@ -398,7 +398,7 @@ PyObject* pylayer_method_apply(PyObject* cls,
auto inplace_tensor_autograd_meta = auto inplace_tensor_autograd_meta =
egr::EagerUtils::autograd_meta(inplace_tensor); egr::EagerUtils::autograd_meta(inplace_tensor);
PADDLE_ENFORCE_EQ(!inplace_tensor_autograd_meta->StopGradient() && PADDLE_ENFORCE_EQ(!inplace_tensor_autograd_meta->StopGradient() &&
egr::egr_utils_api::IsLeafTensor(*inplace_tensor), egr::EagerUtils::IsLeafTensor(*inplace_tensor),
false, false,
paddle::platform::errors::InvalidArgument( paddle::platform::errors::InvalidArgument(
"Leaf Var (%s) that doesn't stop gradient " "Leaf Var (%s) that doesn't stop gradient "
......
set(eager_deps set(eager_deps
phi phi
hook_utils hook_utils
tensor_utils
utils utils
global_utils global_utils
backward backward
......
...@@ -44,6 +44,8 @@ PD_DECLARE_KERNEL(sum_grad, CPU, ALL_LAYOUT); ...@@ -44,6 +44,8 @@ PD_DECLARE_KERNEL(sum_grad, CPU, ALL_LAYOUT);
using namespace egr; // NOLINT using namespace egr; // NOLINT
using namespace egr_utils_api; // NOLINT using namespace egr_utils_api; // NOLINT
using eager_test::CreateTensorWithValue;
TEST(Benchmark, EagerScaleCPU) { TEST(Benchmark, EagerScaleCPU) {
// Prepare Device Contexts // Prepare Device Contexts
eager_test::InitEnv(paddle::platform::CPUPlace()); eager_test::InitEnv(paddle::platform::CPUPlace());
......
...@@ -45,6 +45,8 @@ PD_DECLARE_KERNEL(add_grad, GPU, ALL_LAYOUT); ...@@ -45,6 +45,8 @@ PD_DECLARE_KERNEL(add_grad, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sum, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sum, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sum_grad, GPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sum_grad, GPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
TEST(Benchmark, EagerScaleCUDA) { TEST(Benchmark, EagerScaleCUDA) {
eager_test::InitEnv(paddle::platform::CUDAPlace()); eager_test::InitEnv(paddle::platform::CUDAPlace());
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include "paddle/fluid/eager/accumulation/accumulation_node.h" #include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/api/all.h" #include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.h" #include "paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.h"
#include "paddle/fluid/eager/api/utils/tensor_utils.h"
#include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/grad_node_info.h" #include "paddle/fluid/eager/grad_node_info.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
...@@ -32,6 +31,8 @@ ...@@ -32,6 +31,8 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
TEST(Backward, SingleNodeEmptyGrad) { TEST(Backward, SingleNodeEmptyGrad) {
...@@ -43,12 +44,12 @@ TEST(Backward, SingleNodeEmptyGrad) { ...@@ -43,12 +44,12 @@ TEST(Backward, SingleNodeEmptyGrad) {
// Create Target Tensor // Create Target Tensor
paddle::Tensor target_tensor = paddle::Tensor target_tensor =
egr_utils_api::CreateTensorWithValue(ddim, CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
1.0 /*value*/, 1.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
paddle::Tensor leaf_tensor; paddle::Tensor leaf_tensor;
{ {
...@@ -94,24 +95,23 @@ TEST(Backward, SingleNodeCustomGrad) { ...@@ -94,24 +95,23 @@ TEST(Backward, SingleNodeCustomGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = paddle::Tensor tensor = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 1.0 /*value*/,
1.0 /*value*/, false /*is_leaf*/);
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor)); target_tensors.emplace_back(std::move(tensor));
std::vector<paddle::Tensor> grad_tensors; std::vector<paddle::Tensor> grad_tensors;
// Create Grad Tensor // Create Grad Tensor
paddle::Tensor grad_tensor = paddle::Tensor grad_tensor =
egr_utils_api::CreateTensorWithValue(ddim, CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
10.0 /*value*/, 10.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor)); grad_tensors.emplace_back(std::move(grad_tensor));
paddle::Tensor leaf_tensor; paddle::Tensor leaf_tensor;
...@@ -166,13 +166,12 @@ TEST(Backward, LinearNodes) { ...@@ -166,13 +166,12 @@ TEST(Backward, LinearNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = paddle::Tensor tensor = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 1.0 /*value*/,
1.0 /*value*/, false /*is_leaf*/);
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor)); target_tensors.emplace_back(std::move(tensor));
paddle::Tensor leaf_tensor; paddle::Tensor leaf_tensor;
...@@ -242,39 +241,37 @@ TEST(Backward, WithAccumulation) { ...@@ -242,39 +241,37 @@ TEST(Backward, WithAccumulation) {
// Create Target Tensor // Create Target Tensor
std::vector<paddle::Tensor> target_tensors; std::vector<paddle::Tensor> target_tensors;
paddle::Tensor tensor0 = paddle::Tensor tensor0 = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 1.0 /*value*/,
1.0 /*value*/, false /*is_leaf*/);
false /*is_leaf*/); paddle::Tensor tensor1 = CreateTensorWithValue(ddim,
paddle::Tensor tensor1 = paddle::platform::CPUPlace(),
egr_utils_api::CreateTensorWithValue(ddim, phi::DataType::FLOAT32,
paddle::platform::CPUPlace(), phi::DataLayout::NCHW,
phi::DataType::FLOAT32, 1.0 /*value*/,
phi::DataLayout::NCHW, false /*is_leaf*/);
1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor0)); target_tensors.emplace_back(std::move(tensor0));
target_tensors.emplace_back(std::move(tensor1)); target_tensors.emplace_back(std::move(tensor1));
// Create Grad Tensor // Create Grad Tensor
std::vector<paddle::Tensor> grad_tensors; std::vector<paddle::Tensor> grad_tensors;
paddle::Tensor grad_tensor0 = paddle::Tensor grad_tensor0 =
egr_utils_api::CreateTensorWithValue(ddim, CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
5.0 /*value*/, 5.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
paddle::Tensor grad_tensor1 = paddle::Tensor grad_tensor1 =
egr_utils_api::CreateTensorWithValue(ddim, CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
10.0 /*value*/, 10.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor0)); grad_tensors.emplace_back(std::move(grad_tensor0));
grad_tensors.emplace_back(std::move(grad_tensor1)); grad_tensors.emplace_back(std::move(grad_tensor1));
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include "paddle/fluid/eager/accumulation/accumulation_node.h" #include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/api/all.h" #include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.h" #include "paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.h"
#include "paddle/fluid/eager/api/utils/tensor_utils.h"
#include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/backward.h" #include "paddle/fluid/eager/backward.h"
#include "paddle/fluid/eager/grad_node_info.h" #include "paddle/fluid/eager/grad_node_info.h"
...@@ -39,12 +38,12 @@ TEST(CrossBatchAccumulation, SingleScaleNode) { ...@@ -39,12 +38,12 @@ TEST(CrossBatchAccumulation, SingleScaleNode) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = paddle::Tensor tensor =
egr_utils_api::CreateTensorWithValue(ddim, eager_test::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
1.0 /*value*/, 1.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor)); target_tensors.emplace_back(std::move(tensor));
paddle::Tensor& target_tensor = target_tensors[0]; paddle::Tensor& target_tensor = target_tensors[0];
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "paddle/fluid/eager/api/all.h" #include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.h" #include "paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.h"
#include "paddle/fluid/eager/api/utils/tensor_utils.h"
#include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/grad_node_info.h" #include "paddle/fluid/eager/grad_node_info.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
...@@ -28,6 +27,8 @@ ...@@ -28,6 +27,8 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
TEST(Forward, SingleNode) { TEST(Forward, SingleNode) {
...@@ -39,8 +40,7 @@ TEST(Forward, SingleNode) { ...@@ -39,8 +40,7 @@ TEST(Forward, SingleNode) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor t = paddle::Tensor t = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -89,8 +89,7 @@ TEST(Forward, LinearNodes) { ...@@ -89,8 +89,7 @@ TEST(Forward, LinearNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor t = paddle::Tensor t = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -175,8 +174,7 @@ TEST(Forward, BranchedNodes) { ...@@ -175,8 +174,7 @@ TEST(Forward, BranchedNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor t = paddle::Tensor t = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
......
...@@ -35,6 +35,8 @@ PD_DECLARE_KERNEL(full, GPU, ALL_LAYOUT); ...@@ -35,6 +35,8 @@ PD_DECLARE_KERNEL(full, GPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, KPS, ALL_LAYOUT); PD_DECLARE_KERNEL(add, KPS, ALL_LAYOUT);
#endif #endif
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
paddle::Tensor hook_function(const paddle::Tensor& t) { paddle::Tensor hook_function(const paddle::Tensor& t) {
...@@ -65,13 +67,12 @@ TEST(FwdBwdJoint, SingleNode) { ...@@ -65,13 +67,12 @@ TEST(FwdBwdJoint, SingleNode) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = paddle::Tensor tensor = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 5.0 /*value*/,
5.0 /*value*/, true /*is_leaf*/);
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward // 3. Run Forward
...@@ -109,13 +110,12 @@ TEST(FwdBwdJoint, LinearNodes) { ...@@ -109,13 +110,12 @@ TEST(FwdBwdJoint, LinearNodes) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = paddle::Tensor tensor = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 5.0 /*value*/,
5.0 /*value*/, true /*is_leaf*/);
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward // 3. Run Forward
...@@ -163,13 +163,12 @@ TEST(FwdBwdJoint, BranchedNodes) { ...@@ -163,13 +163,12 @@ TEST(FwdBwdJoint, BranchedNodes) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = paddle::Tensor tensor = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 5.0 /*value*/,
5.0 /*value*/, true /*is_leaf*/);
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward // 3. Run Forward
...@@ -236,13 +235,12 @@ TEST(FwdBwdJoint, GradientHook) { ...@@ -236,13 +235,12 @@ TEST(FwdBwdJoint, GradientHook) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = paddle::Tensor tensor = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 5.0 /*value*/,
5.0 /*value*/, true /*is_leaf*/);
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward // 3. Run Forward
...@@ -309,13 +307,12 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) { ...@@ -309,13 +307,12 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = paddle::Tensor tensor = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 5.0 /*value*/,
5.0 /*value*/, true /*is_leaf*/);
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward // 3. Run Forward
...@@ -364,13 +361,12 @@ TEST(FwdBwdJoint, SingleNodeCUDA) { ...@@ -364,13 +361,12 @@ TEST(FwdBwdJoint, SingleNodeCUDA) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = paddle::Tensor tensor = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CUDAPlace(),
paddle::platform::CUDAPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 5.0 /*value*/,
5.0 /*value*/, true /*is_leaf*/);
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward // 3. Run Forward
...@@ -405,13 +401,12 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) { ...@@ -405,13 +401,12 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor = paddle::Tensor tensor = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CUDAPlace(),
paddle::platform::CUDAPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 5.0 /*value*/,
5.0 /*value*/, true /*is_leaf*/);
true /*is_leaf*/);
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
// 3. Run Forward // 3. Run Forward
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "paddle/fluid/eager/api/all.h" #include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/api/generated/fluid_generated/dygraph_forward_api.h" #include "paddle/fluid/eager/api/generated/fluid_generated/dygraph_forward_api.h"
#include "paddle/fluid/eager/api/utils/tensor_utils.h"
#include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/backward.h" #include "paddle/fluid/eager/backward.h"
#include "paddle/fluid/eager/utils.h" #include "paddle/fluid/eager/utils.h"
...@@ -35,6 +34,8 @@ PD_DECLARE_KERNEL(add_grad, CPU, ALL_LAYOUT); ...@@ -35,6 +34,8 @@ PD_DECLARE_KERNEL(add_grad, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sigmoid, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid_grad, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sigmoid_grad, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
TEST(Generated, Sigmoid) { TEST(Generated, Sigmoid) {
...@@ -44,13 +45,12 @@ TEST(Generated, Sigmoid) { ...@@ -44,13 +45,12 @@ TEST(Generated, Sigmoid) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4}); paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
VLOG(6) << "Make Dim"; VLOG(6) << "Make Dim";
paddle::Tensor tensor = paddle::Tensor tensor = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 0.0,
0.0, true);
true);
VLOG(6) << "Make paddle::Tensor"; VLOG(6) << "Make paddle::Tensor";
egr_utils_api::RetainGradForTensor(tensor); egr_utils_api::RetainGradForTensor(tensor);
VLOG(6) << "Retain Grad for Tensor"; VLOG(6) << "Retain Grad for Tensor";
...@@ -75,8 +75,7 @@ TEST(Generated, Matmul_v2) { ...@@ -75,8 +75,7 @@ TEST(Generated, Matmul_v2) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = paddle::Tensor X = CreateTensorWithValue(ddimX,
egr_utils_api::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -85,8 +84,7 @@ TEST(Generated, Matmul_v2) { ...@@ -85,8 +84,7 @@ TEST(Generated, Matmul_v2) {
egr_utils_api::RetainGradForTensor(X); egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({16, 20}); paddle::framework::DDim ddimY = phi::make_ddim({16, 20});
paddle::Tensor Y = paddle::Tensor Y = CreateTensorWithValue(ddimY,
egr_utils_api::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -115,8 +113,7 @@ TEST(Generated, ElementwiseAdd) { ...@@ -115,8 +113,7 @@ TEST(Generated, ElementwiseAdd) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = paddle::Tensor X = CreateTensorWithValue(ddimX,
egr_utils_api::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -125,8 +122,7 @@ TEST(Generated, ElementwiseAdd) { ...@@ -125,8 +122,7 @@ TEST(Generated, ElementwiseAdd) {
egr_utils_api::RetainGradForTensor(X); egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({4, 16}); paddle::framework::DDim ddimY = phi::make_ddim({4, 16});
paddle::Tensor Y = paddle::Tensor Y = CreateTensorWithValue(ddimY,
egr_utils_api::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include "paddle/fluid/eager/accumulation/accumulation_node.h" #include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/api/all.h" #include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.h" #include "paddle/fluid/eager/api/generated/eager_generated/backwards/scale_node.h"
#include "paddle/fluid/eager/api/utils/tensor_utils.h"
#include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/backward.h" #include "paddle/fluid/eager/backward.h"
#include "paddle/fluid/eager/grad_node_info.h" #include "paddle/fluid/eager/grad_node_info.h"
...@@ -31,6 +30,8 @@ ...@@ -31,6 +30,8 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
TEST(Grad, SingleNodeEmptyGrad) { TEST(Grad, SingleNodeEmptyGrad) {
...@@ -42,21 +43,21 @@ TEST(Grad, SingleNodeEmptyGrad) { ...@@ -42,21 +43,21 @@ TEST(Grad, SingleNodeEmptyGrad) {
// Create Target Tensor (output) // Create Target Tensor (output)
paddle::Tensor output_tensor = paddle::Tensor output_tensor =
egr_utils_api::CreateTensorWithValue(ddim, CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
1.0 /*value*/, 1.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
// Create input tensor // Create input tensor
const paddle::Tensor leaf_tensor = const paddle::Tensor leaf_tensor =
egr_utils_api::CreateTensorWithValue(ddim, CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
1.0 /*value*/, 1.0 /*value*/,
true /*is_leaf*/); true /*is_leaf*/);
{ {
// Create Scale Node // Create Scale Node
...@@ -108,33 +109,32 @@ TEST(Grad, SingleNodeCustomGrad) { ...@@ -108,33 +109,32 @@ TEST(Grad, SingleNodeCustomGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = paddle::Tensor tensor = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 1.0 /*value*/,
1.0 /*value*/, false /*is_leaf*/);
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor)); target_tensors.emplace_back(std::move(tensor));
std::vector<paddle::Tensor> grad_tensors; std::vector<paddle::Tensor> grad_tensors;
// Create Grad Tensor // Create Grad Tensor
paddle::Tensor grad_tensor = paddle::Tensor grad_tensor =
egr_utils_api::CreateTensorWithValue(ddim, CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
10.0 /*value*/, 10.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor)); grad_tensors.emplace_back(std::move(grad_tensor));
paddle::Tensor leaf_tensor = paddle::Tensor leaf_tensor =
egr_utils_api::CreateTensorWithValue(ddim, CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
1.0 /*value*/, 1.0 /*value*/,
true /*is_leaf*/); true /*is_leaf*/);
{ {
// Create Scale Node // Create Scale Node
...@@ -187,22 +187,21 @@ TEST(Grad, LinearNodes) { ...@@ -187,22 +187,21 @@ TEST(Grad, LinearNodes) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = paddle::Tensor tensor = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 1.0 /*value*/,
1.0 /*value*/, false /*is_leaf*/);
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor)); target_tensors.emplace_back(std::move(tensor));
paddle::Tensor leaf_tensor = paddle::Tensor leaf_tensor =
egr_utils_api::CreateTensorWithValue(ddim, CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
1.0 /*value*/, 1.0 /*value*/,
true /*is_leaf*/); true /*is_leaf*/);
{ {
// Create Node0 // Create Node0
auto node0_ptr = std::make_shared<GradNodeScale>(1, 1); auto node0_ptr = std::make_shared<GradNodeScale>(1, 1);
...@@ -269,39 +268,37 @@ TEST(Grad, WithAccumulation) { ...@@ -269,39 +268,37 @@ TEST(Grad, WithAccumulation) {
// Create Target Tensor // Create Target Tensor
std::vector<paddle::Tensor> target_tensors; std::vector<paddle::Tensor> target_tensors;
paddle::Tensor tensor0 = paddle::Tensor tensor0 = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 1.0 /*value*/,
1.0 /*value*/, false /*is_leaf*/);
false /*is_leaf*/); paddle::Tensor tensor1 = CreateTensorWithValue(ddim,
paddle::Tensor tensor1 = paddle::platform::CPUPlace(),
egr_utils_api::CreateTensorWithValue(ddim, phi::DataType::FLOAT32,
paddle::platform::CPUPlace(), phi::DataLayout::NCHW,
phi::DataType::FLOAT32, 1.0 /*value*/,
phi::DataLayout::NCHW, false /*is_leaf*/);
1.0 /*value*/,
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor0)); target_tensors.emplace_back(std::move(tensor0));
target_tensors.emplace_back(std::move(tensor1)); target_tensors.emplace_back(std::move(tensor1));
// Create Grad Tensor // Create Grad Tensor
std::vector<paddle::Tensor> grad_tensors; std::vector<paddle::Tensor> grad_tensors;
paddle::Tensor grad_tensor0 = paddle::Tensor grad_tensor0 =
egr_utils_api::CreateTensorWithValue(ddim, CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
5.0 /*value*/, 5.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
paddle::Tensor grad_tensor1 = paddle::Tensor grad_tensor1 =
egr_utils_api::CreateTensorWithValue(ddim, CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
10.0 /*value*/, 10.0 /*value*/,
false /*is_leaf*/); false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor0)); grad_tensors.emplace_back(std::move(grad_tensor0));
grad_tensors.emplace_back(std::move(grad_tensor1)); grad_tensors.emplace_back(std::move(grad_tensor1));
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
paddle::Tensor hook_function(const paddle::Tensor& t) { paddle::Tensor hook_function(const paddle::Tensor& t) {
...@@ -63,13 +65,12 @@ TEST(RetainGrad, HookBeforeRetainGrad) { ...@@ -63,13 +65,12 @@ TEST(RetainGrad, HookBeforeRetainGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = paddle::Tensor tensor = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 1.0 /*value*/,
1.0 /*value*/, false /*is_leaf*/);
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor)); target_tensors.emplace_back(std::move(tensor));
paddle::Tensor& target_tensor = target_tensors[0]; paddle::Tensor& target_tensor = target_tensors[0];
...@@ -139,13 +140,12 @@ TEST(RetainGrad, HookAfterRetainGrad) { ...@@ -139,13 +140,12 @@ TEST(RetainGrad, HookAfterRetainGrad) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor tensor = paddle::Tensor tensor = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 1.0 /*value*/,
1.0 /*value*/, false /*is_leaf*/);
false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor)); target_tensors.emplace_back(std::move(tensor));
paddle::Tensor& target_tensor = target_tensors[0]; paddle::Tensor& target_tensor = target_tensors[0];
......
...@@ -33,6 +33,8 @@ PD_DECLARE_KERNEL(add_grad, CPU, ALL_LAYOUT); ...@@ -33,6 +33,8 @@ PD_DECLARE_KERNEL(add_grad, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sigmoid, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid_grad, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(sigmoid_grad, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
paddle::Tensor hook_function(const paddle::Tensor& t) { paddle::Tensor hook_function(const paddle::Tensor& t) {
...@@ -67,13 +69,12 @@ void test_sigmoid(bool is_remove_gradient_hook) { ...@@ -67,13 +69,12 @@ void test_sigmoid(bool is_remove_gradient_hook) {
paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4}); paddle::framework::DDim ddim = phi::make_ddim({2, 4, 4, 4});
VLOG(6) << "Make paddle::Tensor"; VLOG(6) << "Make paddle::Tensor";
paddle::Tensor tensor = paddle::Tensor tensor = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 0.0,
0.0, true);
true);
VLOG(6) << "Make ReduceHook function"; VLOG(6) << "Make ReduceHook function";
auto reduce_hook = [&](void) -> void { auto reduce_hook = [&](void) -> void {
...@@ -132,8 +133,7 @@ void test_elementwiseAdd(bool is_remove_gradient_hook) { ...@@ -132,8 +133,7 @@ void test_elementwiseAdd(bool is_remove_gradient_hook) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = paddle::Tensor X = CreateTensorWithValue(ddimX,
egr_utils_api::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -142,8 +142,7 @@ void test_elementwiseAdd(bool is_remove_gradient_hook) { ...@@ -142,8 +142,7 @@ void test_elementwiseAdd(bool is_remove_gradient_hook) {
egr_utils_api::RetainGradForTensor(X); egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({4, 16}); paddle::framework::DDim ddimY = phi::make_ddim({4, 16});
paddle::Tensor Y = paddle::Tensor Y = CreateTensorWithValue(ddimY,
egr_utils_api::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -196,8 +195,7 @@ void test_matmul(bool is_remove_gradient_hook) { ...@@ -196,8 +195,7 @@ void test_matmul(bool is_remove_gradient_hook) {
// 1. Prepare Input // 1. Prepare Input
paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = paddle::Tensor X = CreateTensorWithValue(ddimX,
egr_utils_api::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -206,8 +204,7 @@ void test_matmul(bool is_remove_gradient_hook) { ...@@ -206,8 +204,7 @@ void test_matmul(bool is_remove_gradient_hook) {
egr_utils_api::RetainGradForTensor(X); egr_utils_api::RetainGradForTensor(X);
paddle::framework::DDim ddimY = phi::make_ddim({16, 20}); paddle::framework::DDim ddimY = phi::make_ddim({16, 20});
paddle::Tensor Y = paddle::Tensor Y = CreateTensorWithValue(ddimY,
egr_utils_api::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -259,8 +256,7 @@ void test_backward_final_hooks() { ...@@ -259,8 +256,7 @@ void test_backward_final_hooks() {
VLOG(6) << "Make paddle::Tensor"; VLOG(6) << "Make paddle::Tensor";
paddle::framework::DDim ddimX = phi::make_ddim({4, 16}); paddle::framework::DDim ddimX = phi::make_ddim({4, 16});
paddle::Tensor X = paddle::Tensor X = CreateTensorWithValue(ddimX,
egr_utils_api::CreateTensorWithValue(ddimX,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
...@@ -269,8 +265,7 @@ void test_backward_final_hooks() { ...@@ -269,8 +265,7 @@ void test_backward_final_hooks() {
paddle::framework::DDim ddimY = phi::make_ddim({16, 20}); paddle::framework::DDim ddimY = phi::make_ddim({16, 20});
egr_utils_api::RetainGradForTensor(X); egr_utils_api::RetainGradForTensor(X);
paddle::Tensor Y = paddle::Tensor Y = CreateTensorWithValue(ddimY,
egr_utils_api::CreateTensorWithValue(ddimY,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
......
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/fluid/eager/api/utils/tensor_utils.h"
#include <sstream> #include <sstream>
#include "gtest/gtest.h" #include "gtest/gtest.h"
...@@ -26,6 +24,8 @@ ...@@ -26,6 +24,8 @@
PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT); PD_DECLARE_KERNEL(full, CPU, ALL_LAYOUT);
using eager_test::CreateTensorWithValue;
namespace egr { namespace egr {
TEST(TensorUtils, Test) { TEST(TensorUtils, Test) {
...@@ -37,23 +37,21 @@ TEST(TensorUtils, Test) { ...@@ -37,23 +37,21 @@ TEST(TensorUtils, Test) {
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
// Create Target Tensor // Create Target Tensor
paddle::Tensor t = paddle::Tensor t = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim,
paddle::platform::CPUPlace(), paddle::platform::CPUPlace(),
phi::DataType::FLOAT32, phi::DataType::FLOAT32,
phi::DataLayout::NCHW, phi::DataLayout::NCHW,
5.0 /*value*/, 5.0 /*value*/,
true /*is_leaf*/); true /*is_leaf*/);
paddle::Tensor t_grad = paddle::Tensor t_grad = CreateTensorWithValue(ddim,
egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 1.0 /*value*/,
1.0 /*value*/, false /*is_leaf*/);
false /*is_leaf*/);
CHECK_EQ(egr_utils_api::IsLeafTensor(t), true); CHECK_EQ(EagerUtils::IsLeafTensor(t), true);
// Test Utils // Test Utils
eager_test::CompareTensorWithValue<float>(t, 5.0); eager_test::CompareTensorWithValue<float>(t, 5.0);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#pragma once #pragma once
#include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/api/utils/global_utils.h" #include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/eager/autograd_meta.h" #include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/eager_tensor.h" #include "paddle/fluid/eager/eager_tensor.h"
...@@ -27,6 +28,26 @@ ...@@ -27,6 +28,26 @@
namespace eager_test { namespace eager_test {
inline paddle::Tensor CreateTensorWithValue(
const phi::DDim& ddim,
const paddle::platform::Place& place,
const phi::DataType& dtype,
const phi::DataLayout& layout,
float value,
bool is_leaf = true) {
paddle::Tensor out = paddle::experimental::full(
phi::vectorize(ddim), paddle::experimental::Scalar(value), dtype, place);
auto meta = egr::EagerUtils::autograd_meta(&out);
if (is_leaf) {
auto accumulation_node = std::make_shared<egr::GradNodeAccumulation>(meta);
meta->SetGradNode(accumulation_node);
meta->SetStopGradient(false);
}
return out;
}
template <typename T> template <typename T>
bool CompareGradTensorWithValue(const paddle::Tensor& target, T value) { bool CompareGradTensorWithValue(const paddle::Tensor& target, T value) {
egr::AutogradMeta* meta = egr::EagerUtils::unsafe_autograd_meta(target); egr::AutogradMeta* meta = egr::EagerUtils::unsafe_autograd_meta(target);
......
...@@ -2,15 +2,17 @@ ...@@ -2,15 +2,17 @@
# in Linux, c_api test cant do like this or graph_to_program register more than once. # in Linux, c_api test cant do like this or graph_to_program register more than once.
# Both Windows and Linux can only use paddle_inference_c, but this will increase size # Both Windows and Linux can only use paddle_inference_c, but this will increase size
# of build folder by 30G. # of build folder by 30G.
set(inference_api_tester_deps paddle_inference_api analysis_config)
cc_test( cc_test(
test_paddle_inference_api test_paddle_inference_api
SRCS api_tester.cc SRCS api_tester.cc
DEPS paddle_inference_api) DEPS ${inference_api_tester_deps})
cc_test( cc_test(
inference_api_helper_test inference_api_helper_test
SRCS helper_test.cc SRCS helper_test.cc
DEPS paddle_inference_api) DEPS ${inference_api_tester_deps})
if(WITH_ONNXRUNTIME AND WIN32) if(WITH_ONNXRUNTIME AND WIN32)
# Copy onnxruntime for some c++ test in Windows, since the test will # Copy onnxruntime for some c++ test in Windows, since the test will
...@@ -1356,7 +1358,7 @@ if(WITH_TESTING AND WITH_INFERENCE_API_TEST) ...@@ -1356,7 +1358,7 @@ if(WITH_TESTING AND WITH_INFERENCE_API_TEST)
cc_test( cc_test(
paddle_infer_api_errors_test paddle_infer_api_errors_test
SRCS paddle_infer_api_errors_tester.cc SRCS paddle_infer_api_errors_tester.cc
DEPS paddle_inference_api) DEPS ${inference_api_tester_deps})
if(WITH_GPU AND TENSORRT_FOUND) if(WITH_GPU AND TENSORRT_FOUND)
set_tests_properties(trt_resnext_test PROPERTIES TIMEOUT 300) set_tests_properties(trt_resnext_test PROPERTIES TIMEOUT 300)
...@@ -1485,7 +1487,7 @@ if(WITH_TESTING AND WITH_INFERENCE_API_TEST) ...@@ -1485,7 +1487,7 @@ if(WITH_TESTING AND WITH_INFERENCE_API_TEST)
--infer_model=${RESNET50_MODEL_DIR}) --infer_model=${RESNET50_MODEL_DIR})
endif() endif()
set(inference_deps ${analysis_deps} paddle_inference_api analysis set(inference_deps ${analysis_deps} ${inference_api_tester_deps} analysis
naive_executor ${GLOB_PASS_LIB}) naive_executor ${GLOB_PASS_LIB})
if(WITH_TESTING) if(WITH_TESTING)
......
set(prim_eager_deps set(prim_eager_deps
phi phi
hook_utils hook_utils
tensor_utils
utils utils
global_utils global_utils
backward backward
......
...@@ -65,6 +65,8 @@ PD_DECLARE_KERNEL(bitwise_not, KPS, ALL_LAYOUT); ...@@ -65,6 +65,8 @@ PD_DECLARE_KERNEL(bitwise_not, KPS, ALL_LAYOUT);
#endif #endif
using eager_test::CreateTensorWithValue;
namespace paddle { namespace paddle {
namespace prim { namespace prim {
...@@ -75,21 +77,19 @@ TEST(EagerPrim, TanhBackwardTest) { ...@@ -75,21 +77,19 @@ TEST(EagerPrim, TanhBackwardTest) {
paddle::prim::InitTensorOperants(); paddle::prim::InitTensorOperants();
// 2. pre // 2. pre
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor0 = paddle::Tensor tensor0 = CreateTensorWithValue(ddim,
::egr::egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 5.0 /*value*/,
5.0 /*value*/, true /*is_leaf*/);
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor0); ::egr::egr_utils_api::RetainGradForTensor(tensor0);
paddle::Tensor tensor1 = paddle::Tensor tensor1 = CreateTensorWithValue(ddim,
::egr::egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::FLOAT32,
phi::DataType::FLOAT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 5.0 /*value*/,
5.0 /*value*/, true /*is_leaf*/);
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor1); ::egr::egr_utils_api::RetainGradForTensor(tensor1);
// 3. Run Forward once // 3. Run Forward once
paddle::Tensor out0 = tanh_ad_func(tensor0); paddle::Tensor out0 = tanh_ad_func(tensor0);
...@@ -132,21 +132,19 @@ TEST(EagerPrim, LogicalOperantsTest) { ...@@ -132,21 +132,19 @@ TEST(EagerPrim, LogicalOperantsTest) {
paddle::prim::InitTensorOperants(); paddle::prim::InitTensorOperants();
// 2. pre // 2. pre
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor0 = paddle::Tensor tensor0 = CreateTensorWithValue(ddim,
::egr::egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::INT32,
phi::DataType::INT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 1 /*value*/,
1 /*value*/, true /*is_leaf*/);
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor0); ::egr::egr_utils_api::RetainGradForTensor(tensor0);
paddle::Tensor tensor1 = paddle::Tensor tensor1 = CreateTensorWithValue(ddim,
::egr::egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::INT32,
phi::DataType::INT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 0 /*value*/,
0 /*value*/, true /*is_leaf*/);
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor1); ::egr::egr_utils_api::RetainGradForTensor(tensor1);
// 3. Run Forward once // 3. Run Forward once
paddle::Tensor out0 = tensor0 & tensor1; paddle::Tensor out0 = tensor0 & tensor1;
...@@ -170,21 +168,19 @@ TEST(EagerPrim, CompareOperantsTest) { ...@@ -170,21 +168,19 @@ TEST(EagerPrim, CompareOperantsTest) {
paddle::prim::InitTensorOperants(); paddle::prim::InitTensorOperants();
// 2. pre // 2. pre
paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32}); paddle::framework::DDim ddim = phi::make_ddim({4, 16, 16, 32});
paddle::Tensor tensor0 = paddle::Tensor tensor0 = CreateTensorWithValue(ddim,
::egr::egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::INT32,
phi::DataType::INT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 1 /*value*/,
1 /*value*/, true /*is_leaf*/);
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor0); ::egr::egr_utils_api::RetainGradForTensor(tensor0);
paddle::Tensor tensor1 = paddle::Tensor tensor1 = CreateTensorWithValue(ddim,
::egr::egr_utils_api::CreateTensorWithValue(ddim, paddle::platform::CPUPlace(),
paddle::platform::CPUPlace(), phi::DataType::INT32,
phi::DataType::INT32, phi::DataLayout::NCHW,
phi::DataLayout::NCHW, 0 /*value*/,
0 /*value*/, true /*is_leaf*/);
true /*is_leaf*/);
::egr::egr_utils_api::RetainGradForTensor(tensor1); ::egr::egr_utils_api::RetainGradForTensor(tensor1);
// 3. Run Forward once // 3. Run Forward once
paddle::Tensor out0 = (tensor0 < tensor1); paddle::Tensor out0 = (tensor0 < tensor1);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册