Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
8305c2be
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
8305c2be
编写于
12月 16, 2021
作者:
J
Jiabin Yang
提交者:
GitHub
12月 16, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
support eager switch system (#38170)
* support eager switch system * polish code
上级
092839d6
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
220 addition
and
163 deletion
+220
-163
paddle/fluid/eager/api/utils/CMakeLists.txt
paddle/fluid/eager/api/utils/CMakeLists.txt
+1
-1
paddle/fluid/eager/api/utils/global_utils.h
paddle/fluid/eager/api/utils/global_utils.h
+32
-12
paddle/fluid/eager/legacy/amp_auto_cast.cc
paddle/fluid/eager/legacy/amp_auto_cast.cc
+1
-1
paddle/fluid/eager/legacy/amp_auto_cast.h
paddle/fluid/eager/legacy/amp_auto_cast.h
+3
-10
paddle/fluid/eager/legacy/op_runner.cc
paddle/fluid/eager/legacy/op_runner.cc
+2
-2
paddle/fluid/pybind/CMakeLists.txt
paddle/fluid/pybind/CMakeLists.txt
+1
-1
paddle/fluid/pybind/eager.cc
paddle/fluid/pybind/eager.cc
+4
-5
paddle/fluid/pybind/eager_functions.cc
paddle/fluid/pybind/eager_functions.cc
+15
-5
paddle/fluid/pybind/eager_method.cc
paddle/fluid/pybind/eager_method.cc
+11
-15
paddle/fluid/pybind/eager_properties.cc
paddle/fluid/pybind/eager_properties.cc
+22
-33
paddle/fluid/pybind/eager_utils.cc
paddle/fluid/pybind/eager_utils.cc
+33
-10
paddle/fluid/pybind/eager_utils.h
paddle/fluid/pybind/eager_utils.h
+4
-2
paddle/fluid/pybind/exception.h
paddle/fluid/pybind/exception.h
+6
-0
paddle/fluid/pybind/imperative.cc
paddle/fluid/pybind/imperative.cc
+12
-2
python/paddle/fluid/core.py
python/paddle/fluid/core.py
+6
-0
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+16
-38
python/paddle/fluid/tests/unittests/test_egr_code_generate_api.py
...addle/fluid/tests/unittests/test_egr_code_generate_api.py
+5
-5
python/paddle/fluid/tests/unittests/test_egr_python_api.py
python/paddle/fluid/tests/unittests/test_egr_python_api.py
+44
-19
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+2
-2
未找到文件。
paddle/fluid/eager/api/utils/CMakeLists.txt
浏览文件 @
8305c2be
cc_library
(
tensor_utils SRCS tensor_utils.cc DEPS pten pten_api autograd_meta grad_node_info accumulation_node
)
cc_library
(
hook_utils SRCS hook_utils.cc DEPS pten tensor_utils autograd_meta grad_node_info utils accumulation_node
)
cc_library
(
global_utils SRCS global_utils.cc DEPS place
)
cc_library
(
global_utils SRCS global_utils.cc DEPS place
tracer
)
paddle/fluid/eager/api/utils/global_utils.h
浏览文件 @
8305c2be
...
...
@@ -17,7 +17,7 @@
#include <atomic>
#include <memory>
#include "paddle/fluid/
platform/place
.h"
#include "paddle/fluid/
imperative/tracer
.h"
namespace
egr
{
...
...
@@ -34,29 +34,49 @@ class UniqueNameGenerator {
};
// Global
// TODO(jiabin): Now we are using imperative tracer, move it here when we
// deprecate imperative.
class
Controller
{
public:
static
Controller
&
Instance
()
{
return
*
controller_
;
}
const
paddle
::
platform
::
Place
&
GetExpectedPlace
()
const
{
return
*
expected_place_
.
get
();
paddle
::
platform
::
Place
GetExpectedPlace
()
const
{
return
tracer_
->
ExpectedPlace
();
}
void
SetExpectedPlace
(
const
paddle
::
platform
::
Place
&
place
)
{
expected_place_
=
std
::
make_shared
<
paddle
::
platform
::
Place
>
(
place
);
tracer_
->
SetExpectedPlace
(
place
);
}
void
SetAMPLevel
(
paddle
::
imperative
::
AmpLevel
level
)
{
tracer_
->
SetAmpLevel
(
level
);
}
void
SetAMPLevel
(
int
level
)
{
amp_level_
=
level
;
}
int
GetAMPLevel
()
const
{
return
amp_level_
;
}
bool
HasGrad
()
const
{
return
has_grad_
;
}
paddle
::
imperative
::
AmpLevel
GetAMPLevel
()
const
{
return
tracer_
->
GetAmpLevel
();
}
bool
HasGrad
()
const
{
return
tracer_
->
HasGrad
();
}
void
SetHasGrad
(
bool
has_grad
)
{
tracer_
->
SetHasGrad
(
has_grad
);
}
std
::
string
GenerateUniqueName
(
std
::
string
key
=
"eager_tmp"
)
{
return
generator_
->
Generate
(
key
);
return
tracer_
->
GenerateUniqueName
(
key
);
}
const
std
::
shared_ptr
<
paddle
::
imperative
::
Tracer
>&
GetCurrentTracer
()
{
return
tracer_
;
}
void
SetCurrentTracer
(
const
std
::
shared_ptr
<
paddle
::
imperative
::
Tracer
>&
tracer
)
{
tracer_
=
tracer
;
VLOG
(
6
)
<<
"Set current tracer: "
<<
tracer_
;
}
bool
InEagerMode
()
const
{
return
in_eager_mode_
;
}
void
SetInEagerMode
(
bool
in_eager_mode
)
{
in_eager_mode_
=
in_eager_mode
;
}
private:
Controller
()
=
default
;
static
Controller
*
controller_
;
std
::
shared_ptr
<
paddle
::
platform
::
Place
>
expected_place_
=
nullptr
;
int
amp_level_
=
0
;
bool
has_grad_
=
true
;
std
::
unique_ptr
<
UniqueNameGenerator
>
generator_
{
new
UniqueNameGenerator
()
};
std
::
shared_ptr
<
paddle
::
imperative
::
Tracer
>
tracer_
{
new
paddle
::
imperative
::
Tracer
()}
;
// TODO(jiabin): remove when we don't need imperative.
bool
in_eager_mode_
{
false
};
DISABLE_COPY_AND_ASSIGN
(
Controller
);
};
...
...
paddle/fluid/eager/legacy/amp_auto_cast.cc
浏览文件 @
8305c2be
...
...
@@ -117,7 +117,7 @@ static inline std::shared_ptr<egr::EagerTensor> CastToType(
NameTensorMap
outs
=
{{
"Out"
,
{
out
}}};
{
AutoCastGuard
guard
(
0
);
AutoCastGuard
guard
(
paddle
::
imperative
::
AmpLevel
::
O
0
);
paddle
::
framework
::
AttributeMap
default_attrs
;
RunOp
(
"cast"
,
ins
,
outs
,
std
::
move
(
attrs
),
{},
&
default_attrs
,
true
);
}
...
...
paddle/fluid/eager/legacy/amp_auto_cast.h
浏览文件 @
8305c2be
...
...
@@ -22,18 +22,11 @@
#include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/fluid/eager/legacy/type_def.h"
#include "paddle/fluid/imperative/amp_auto_cast.h"
namespace
egr
{
namespace
legacy
{
// NOTE(zhiqiu): only O1 and O2 are valid now
enum
class
AmpLevel
{
O0
=
0
,
// fp32
O1
,
// amp, mixed fp32-fp16
O2
,
// almost fp16
O3
,
// fp16
};
class
AmpOperators
{
public:
~
AmpOperators
();
...
...
@@ -69,7 +62,7 @@ std::ostream& operator<<(std::ostream& os, AmpOperators& ops);
// NOTE(zhiqiu): AutoCastGuard is used for RAII.
class
AutoCastGuard
{
public:
explicit
AutoCastGuard
(
int
guard_level
)
{
explicit
AutoCastGuard
(
paddle
::
imperative
::
AmpLevel
guard_level
)
{
pre_amp_level_
=
Controller
::
Instance
().
GetAMPLevel
();
if
(
pre_amp_level_
!=
guard_level
)
{
...
...
@@ -84,7 +77,7 @@ class AutoCastGuard {
AutoCastGuard
&
operator
=
(
const
AutoCastGuard
&
guard
)
=
delete
;
private:
int
pre_amp_level_
;
paddle
::
imperative
::
AmpLevel
pre_amp_level_
;
};
NameTensorMap
AutoCastInputs
(
const
std
::
string
&
op_type
,
...
...
paddle/fluid/eager/legacy/op_runner.cc
浏览文件 @
8305c2be
...
...
@@ -131,10 +131,10 @@ void RunOp(const std::string& type, const NameTensorMap& ins,
auto
amp_level
=
egr
::
Controller
::
Instance
().
GetAMPLevel
();
NameTensorMap
new_ins
=
ins
;
if
(
amp_level
==
1
)
{
if
(
amp_level
==
paddle
::
imperative
::
AmpLevel
::
O
1
)
{
VLOG
(
5
)
<<
"Auto mixed precision run operator: "
<<
type
;
new_ins
=
AutoCastInputs
(
type
,
ins
);
}
else
if
(
amp_level
==
2
)
{
}
else
if
(
amp_level
==
paddle
::
imperative
::
AmpLevel
::
O
2
)
{
VLOG
(
5
)
<<
"Pure fp16 run operator: "
<<
type
;
new_ins
=
CastPureFp16Inputs
(
type
,
ins
);
}
...
...
paddle/fluid/pybind/CMakeLists.txt
浏览文件 @
8305c2be
...
...
@@ -2,7 +2,7 @@ set(PYBIND_DEPS pybind python proto_desc memory executor fleet_wrapper box_wrapp
feed_fetch_method pass generate_pass pass_builder parallel_executor profiler layer tracer engine scope_pool
analysis_predictor imperative_profiler imperative_flag save_load_util dlpack_tensor device_context
gloo_wrapper infer_io_utils heter_wrapper generator op_version_registry ps_gpu_wrapper custom_operator
cost_model cuda_graph_with_memory_pool fleet_executor
)
cost_model cuda_graph_with_memory_pool fleet_executor
global_utils
)
if
(
WITH_PSCORE
)
set
(
PYBIND_DEPS
${
PYBIND_DEPS
}
ps_service
)
...
...
paddle/fluid/pybind/eager.cc
浏览文件 @
8305c2be
...
...
@@ -37,18 +37,17 @@ namespace py = ::pybind11;
PyTypeObject
*
p_eager_tensor_type
;
PyObject
*
eagertensor_new
(
PyTypeObject
*
type
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
PyObject
*
EagerTensorNew
(
PyTypeObject
*
type
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
PyObject
*
obj
=
type
->
tp_alloc
(
type
,
0
);
if
(
obj
)
{
auto
v
=
reinterpret_cast
<
EagerTensorObject
*>
(
obj
);
new
(
&
(
v
->
eagertensor
))
egr
::
EagerTensor
();
new
(
&
(
v
->
eager
_
tensor
))
egr
::
EagerTensor
();
}
return
obj
;
}
static
void
eagertensor_dealloc
(
EagerTensorObject
*
self
)
{
self
->
eagertensor
.
~
EagerTensor
();
self
->
eager
_
tensor
.
~
EagerTensor
();
Py_TYPE
(
self
)
->
tp_free
(
reinterpret_cast
<
PyObject
*>
(
self
));
}
...
...
@@ -94,7 +93,7 @@ PyTypeObject eager_tensor_type = {
0
,
/* tp_dictoffset */
0
,
/* tp_init */
0
,
/* tp_alloc */
eagertensor_new
,
/* tp_new */
EagerTensorNew
,
/* tp_new */
0
,
/* tp_free */
0
,
/* tp_is_gc */
0
,
/* tp_bases */
...
...
paddle/fluid/pybind/eager_functions.cc
浏览文件 @
8305c2be
...
...
@@ -90,13 +90,20 @@ static PyObject* eager_api_set_expected_place(PyObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static
PyObject
*
eager_api_get_expected_place
(
PyObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
EAGER_TRY
return
ToPyObject
(
egr
::
Controller
::
Instance
().
GetExpectedPlace
());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static
PyObject
*
eager_api_scale
(
PyObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
EAGER_TRY
// TODO(jiabin): Sync Tensor and Variable here when we support
egr
::
EagerTensor
ret
=
egr
::
scale
(
reinterpret_cast
<
EagerTensorObject
*>
(
PyTuple_GET_ITEM
(
args
,
0
))
->
eagertensor
,
->
eager
_
tensor
,
CastPyArg2AttrFloat
(
PyTuple_GET_ITEM
(
args
,
1
),
1
),
CastPyArg2AttrFloat
(
PyTuple_GET_ITEM
(
args
,
2
),
2
),
CastPyArg2AttrBoolean
(
PyTuple_GET_ITEM
(
args
,
3
),
3
),
...
...
@@ -128,10 +135,10 @@ static PyObject* eager_api_numpy_to_tensor(PyObject* numpy_data,
PyObject
*
obj
=
p_eager_tensor_type
->
tp_alloc
(
p_eager_tensor_type
,
0
);
if
(
obj
)
{
auto
v
=
reinterpret_cast
<
EagerTensorObject
*>
(
obj
);
new
(
&
(
v
->
eagertensor
))
egr
::
EagerTensor
();
v
->
eagertensor
.
set_impl
(
densetensor
);
v
->
eagertensor
.
set_name
(
egr
::
Controller
::
Instance
().
GenerateUniqueName
());
auto
meta
=
egr
::
EagerUtils
::
autograd_meta
(
&
(
v
->
eagertensor
));
new
(
&
(
v
->
eager
_
tensor
))
egr
::
EagerTensor
();
v
->
eager
_
tensor
.
set_impl
(
densetensor
);
v
->
eager
_
tensor
.
set_name
(
egr
::
Controller
::
Instance
().
GenerateUniqueName
());
auto
meta
=
egr
::
EagerUtils
::
autograd_meta
(
&
(
v
->
eager
_
tensor
));
meta
->
SetStopGradient
(
stop_gradient
);
// Created tensor will be leaf tensor
...
...
@@ -204,6 +211,9 @@ PyMethodDef variable_functions[] = {
{
"_set_expected_place"
,
(
PyCFunction
)(
void
(
*
)(
void
))
eager_api_set_expected_place
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
{
"_get_expected_place"
,
(
PyCFunction
)(
void
(
*
)(
void
))
eager_api_get_expected_place
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
{
"retain_grad_for_tensor"
,
(
PyCFunction
)(
void
(
*
)(
void
))
eager_api_retain_grad_for_tensor
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
...
...
paddle/fluid/pybind/eager_method.cc
浏览文件 @
8305c2be
...
...
@@ -36,15 +36,14 @@ extern PyTypeObject* pEagerTensorType;
static
PyObject
*
eager_tensor_method_numpy
(
EagerTensorObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
EAGER_TRY
self
->
eagertensor
.
SyncToTensor
();
if
(
!
self
->
eagertensor
.
initialized
())
{
EAGER_SYNC_TRY
if
(
!
self
->
eager_tensor
.
initialized
())
{
Py_INCREF
(
Py_None
);
return
Py_None
;
}
auto
tensor_dims
=
self
->
eagertensor
.
shape
();
auto
numpy_dtype
=
TensorDtype2NumpyDtype
(
self
->
eagertensor
.
type
());
auto
sizeof_dtype
=
pten
::
DataTypeSize
(
self
->
eagertensor
.
type
());
auto
tensor_dims
=
self
->
eager
_
tensor
.
shape
();
auto
numpy_dtype
=
TensorDtype2NumpyDtype
(
self
->
eager
_
tensor
.
type
());
auto
sizeof_dtype
=
pten
::
DataTypeSize
(
self
->
eager
_
tensor
.
type
());
Py_intptr_t
py_dims
[
paddle
::
framework
::
DDim
::
kMaxRank
];
Py_intptr_t
py_strides
[
paddle
::
framework
::
DDim
::
kMaxRank
];
size_t
numel
=
1
;
...
...
@@ -61,18 +60,18 @@ static PyObject* eager_tensor_method_numpy(EagerTensorObject* self,
pybind11
::
detail
::
npy_api
::
NPY_ARRAY_WRITEABLE_
,
nullptr
);
if
(
self
->
eagertensor
.
is_cpu
())
{
if
(
self
->
eager
_
tensor
.
is_cpu
())
{
auto
dense_tensor
=
std
::
dynamic_pointer_cast
<
pten
::
DenseTensor
>
(
self
->
eagertensor
.
impl
());
std
::
dynamic_pointer_cast
<
pten
::
DenseTensor
>
(
self
->
eager
_
tensor
.
impl
());
platform
::
CPUPlace
place
;
// deep copy
paddle
::
memory
::
Copy
(
place
,
reinterpret_cast
<
void
*>
(
pybind11
::
detail
::
array_proxy
(
array
)
->
data
),
place
,
dense_tensor
->
data
(),
sizeof_dtype
*
numel
);
#if defined(PADDLE_WITH_CUDA)
}
else
if
(
self
->
eagertensor
.
is_cuda
())
{
}
else
if
(
self
->
eager
_
tensor
.
is_cuda
())
{
auto
dense_tensor
=
std
::
dynamic_pointer_cast
<
pten
::
DenseTensor
>
(
self
->
eagertensor
.
impl
());
std
::
dynamic_pointer_cast
<
pten
::
DenseTensor
>
(
self
->
eager
_
tensor
.
impl
());
paddle
::
platform
::
GpuMemcpySync
(
pybind11
::
detail
::
array_proxy
(
array
)
->
data
,
dense_tensor
->
data
(),
...
...
@@ -93,11 +92,8 @@ static PyObject* eager_tensor_method_numpy(EagerTensorObject* self,
static
PyObject
*
eager_tensor_method_is_initialized
(
EagerTensorObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
EAGER_TRY
if
(
self
->
eagertensor
.
Var
().
IsInitialized
())
{
self
->
eagertensor
.
SyncToTensor
();
}
return
ToPyObject
(
self
->
eagertensor
.
initialized
());
EAGER_SYNC_TRY
return
ToPyObject
(
self
->
eager_tensor
.
initialized
());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
...
...
paddle/fluid/pybind/eager_properties.cc
浏览文件 @
8305c2be
...
...
@@ -36,44 +36,39 @@ extern PyTypeObject* p_eager_tensor_type;
PyObject
*
eager_tensor_properties_get_name
(
EagerTensorObject
*
self
,
void
*
closure
)
{
EAGER_TRY
self
->
eagertensor
.
SyncToTensor
();
return
ToPyObject
(
self
->
eagertensor
.
name
());
EAGER_SYNC_TRY
return
ToPyObject
(
self
->
eager_tensor
.
name
());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
int
eager_tensor_properties_set_name
(
EagerTensorObject
*
self
,
PyObject
*
value
,
void
*
closure
)
{
EAGER_TRY
self
->
eagertensor
.
SyncToTensor
();
self
->
eagertensor
.
set_name
(
CastPyArg2AttrString
(
value
,
0
));
EAGER_SYNC_TRY
self
->
eager_tensor
.
set_name
(
CastPyArg2AttrString
(
value
,
0
));
return
0
;
EAGER_CATCH_AND_THROW_RETURN_ZERO
}
PyObject
*
eager_tensor_properties_get_stop_gradient
(
EagerTensorObject
*
self
,
void
*
closure
)
{
EAGER_TRY
self
->
eagertensor
.
SyncToTensor
();
auto
meta
=
egr
::
EagerUtils
::
autograd_meta
(
&
self
->
eagertensor
);
EAGER_SYNC_TRY
auto
meta
=
egr
::
EagerUtils
::
autograd_meta
(
&
self
->
eager_tensor
);
return
ToPyObject
(
meta
->
StopGradient
());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject
*
eager_tensor_properties_get_grad
(
EagerTensorObject
*
self
,
void
*
closure
)
{
EAGER_TRY
self
->
eagertensor
.
SyncToTensor
();
auto
meta
=
egr
::
EagerUtils
::
unsafe_autograd_meta
(
self
->
eagertensor
);
EAGER_SYNC_TRY
auto
meta
=
egr
::
EagerUtils
::
unsafe_autograd_meta
(
self
->
eager_tensor
);
return
ToPyObject
(
meta
->
Grad
());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
int
eager_tensor_properties_set_stop_gradient
(
EagerTensorObject
*
self
,
PyObject
*
value
,
void
*
closure
)
{
EAGER_TRY
self
->
eagertensor
.
SyncToTensor
();
auto
meta
=
egr
::
EagerUtils
::
autograd_meta
(
&
self
->
eagertensor
);
EAGER_SYNC_TRY
auto
meta
=
egr
::
EagerUtils
::
autograd_meta
(
&
self
->
eager_tensor
);
meta
->
SetStopGradient
(
CastPyArg2AttrBoolean
(
value
,
0
));
return
0
;
EAGER_CATCH_AND_THROW_RETURN_ZERO
...
...
@@ -81,18 +76,16 @@ int eager_tensor_properties_set_stop_gradient(EagerTensorObject* self,
PyObject
*
eager_tensor_properties_get_persistable
(
EagerTensorObject
*
self
,
void
*
closure
)
{
EAGER_TRY
self
->
eagertensor
.
SyncToTensor
();
auto
meta
=
egr
::
EagerUtils
::
autograd_meta
(
&
self
->
eagertensor
);
EAGER_SYNC_TRY
auto
meta
=
egr
::
EagerUtils
::
autograd_meta
(
&
self
->
eager_tensor
);
return
ToPyObject
(
meta
->
Persistable
());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
int
eager_tensor_properties_set_persistable
(
EagerTensorObject
*
self
,
PyObject
*
value
,
void
*
closure
)
{
EAGER_TRY
self
->
eagertensor
.
SyncToTensor
();
auto
meta
=
egr
::
EagerUtils
::
autograd_meta
(
&
self
->
eagertensor
);
EAGER_SYNC_TRY
auto
meta
=
egr
::
EagerUtils
::
autograd_meta
(
&
self
->
eager_tensor
);
meta
->
SetPersistable
(
CastPyArg2AttrBoolean
(
value
,
0
));
return
0
;
EAGER_CATCH_AND_THROW_RETURN_ZERO
...
...
@@ -100,9 +93,8 @@ int eager_tensor_properties_set_persistable(EagerTensorObject* self,
PyObject
*
eager_tensor_properties_get_shape
(
EagerTensorObject
*
self
,
void
*
closure
)
{
EAGER_TRY
self
->
eagertensor
.
SyncToTensor
();
auto
ddim
=
self
->
eagertensor
.
shape
();
EAGER_SYNC_TRY
auto
ddim
=
self
->
eager_tensor
.
shape
();
std
::
vector
<
int64_t
>
value
;
size_t
rank
=
static_cast
<
size_t
>
(
ddim
.
size
());
value
.
resize
(
rank
);
...
...
@@ -116,27 +108,24 @@ PyObject* eager_tensor_properties_get_shape(EagerTensorObject* self,
PyObject
*
eager_tensor_properties_get_place
(
EagerTensorObject
*
self
,
void
*
closure
)
{
EAGER_TRY
self
->
eagertensor
.
SyncToTensor
();
return
ToPyObject
(
self
->
eagertensor
.
place
());
EAGER_SYNC_TRY
return
ToPyObject
(
self
->
eager_tensor
.
place
());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject
*
eager_tensor_properties_get_place_str
(
EagerTensorObject
*
self
,
void
*
closure
)
{
EAGER_TRY
self
->
eagertensor
.
SyncToTensor
();
EAGER_SYNC_TRY
std
::
stringstream
ostr
;
ostr
<<
self
->
eagertensor
.
place
();
ostr
<<
self
->
eager
_
tensor
.
place
();
return
ToPyObject
(
ostr
.
str
());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject
*
eager_tensor_properties_get_dtype
(
EagerTensorObject
*
self
,
void
*
closure
)
{
EAGER_TRY
self
->
eagertensor
.
SyncToTensor
();
return
ToPyObject
(
pten
::
DataType2String
(
self
->
eagertensor
.
type
()));
EAGER_SYNC_TRY
return
ToPyObject
(
pten
::
TransToProtoVarType
(
self
->
eager_tensor
.
type
()));
EAGER_CATCH_AND_THROW_RETURN_NULL
}
...
...
paddle/fluid/pybind/eager_utils.cc
浏览文件 @
8305c2be
...
...
@@ -33,6 +33,7 @@ namespace pybind {
extern
PyTypeObject
*
p_eager_tensor_type
;
extern
PyTypeObject
*
g_vartype_pytype
;
extern
PyTypeObject
*
g_place_pytype
;
extern
PyTypeObject
*
g_cudaplace_pytype
;
extern
PyTypeObject
*
g_cpuplace_pytype
;
...
...
@@ -174,7 +175,7 @@ std::string CastPyArg2AttrString(PyObject* obj, ssize_t arg_pos) {
egr
::
EagerTensor
CastPyArg2EagerTensor
(
PyObject
*
obj
,
ssize_t
arg_pos
)
{
if
(
PyObject_IsInstance
(
obj
,
reinterpret_cast
<
PyObject
*>
(
p_eager_tensor_type
)))
{
return
reinterpret_cast
<
EagerTensorObject
*>
(
obj
)
->
eagertensor
;
return
reinterpret_cast
<
EagerTensorObject
*>
(
obj
)
->
eager
_
tensor
;
}
else
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"argument (position %d) must be "
...
...
@@ -194,7 +195,7 @@ std::vector<egr::EagerTensor> CastPyArg2VectorOfEagerTensor(PyObject* obj,
if
(
PyObject_IsInstance
(
item
,
reinterpret_cast
<
PyObject
*>
(
p_eager_tensor_type
)))
{
result
.
emplace_back
(
reinterpret_cast
<
EagerTensorObject
*>
(
item
)
->
eagertensor
);
reinterpret_cast
<
EagerTensorObject
*>
(
item
)
->
eager
_
tensor
);
}
else
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"argument (position %d) must be "
...
...
@@ -211,7 +212,7 @@ std::vector<egr::EagerTensor> CastPyArg2VectorOfEagerTensor(PyObject* obj,
if
(
PyObject_IsInstance
(
item
,
reinterpret_cast
<
PyObject
*>
(
p_eager_tensor_type
)))
{
result
.
emplace_back
(
reinterpret_cast
<
EagerTensorObject
*>
(
item
)
->
eagertensor
);
reinterpret_cast
<
EagerTensorObject
*>
(
item
)
->
eager
_
tensor
);
}
else
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"argument (position %d) must be "
...
...
@@ -258,6 +259,22 @@ platform::Place CastPyArg2Place(PyObject* obj, ssize_t arg_pos) {
return
place
;
}
paddle
::
framework
::
proto
::
VarType
::
Type
CastPyArg2ProtoType
(
PyObject
*
obj
,
ssize_t
arg_pos
)
{
paddle
::
framework
::
proto
::
VarType
::
Type
dtype
;
if
(
PyObject_IsInstance
(
obj
,
reinterpret_cast
<
PyObject
*>
(
g_vartype_pytype
)))
{
dtype
=
::
pybind11
::
handle
(
obj
).
cast
<
paddle
::
framework
::
proto
::
VarType
::
Type
>
();
}
else
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
"argument (position %d) must be "
"one of core.VarDesc.VarType, "
"but got %s"
,
arg_pos
+
1
,
reinterpret_cast
<
PyTypeObject
*>
(
obj
->
ob_type
)
->
tp_name
));
}
return
dtype
;
}
PyObject
*
ToPyObject
(
bool
value
)
{
if
(
value
)
{
Py_INCREF
(
Py_True
);
...
...
@@ -286,8 +303,8 @@ PyObject* ToPyObject(const egr::EagerTensor& value) {
PyObject
*
obj
=
p_eager_tensor_type
->
tp_alloc
(
p_eager_tensor_type
,
0
);
if
(
obj
)
{
auto
v
=
reinterpret_cast
<
EagerTensorObject
*>
(
obj
);
new
(
&
(
v
->
eagertensor
))
egr
::
EagerTensor
();
v
->
eagertensor
=
value
;
new
(
&
(
v
->
eager
_
tensor
))
egr
::
EagerTensor
();
v
->
eager
_
tensor
=
value
;
}
else
{
PADDLE_THROW
(
platform
::
errors
::
Fatal
(
"tp_alloc return null, can not new a PyObject."
));
...
...
@@ -352,8 +369,8 @@ PyObject* ToPyObject(const std::vector<egr::EagerTensor>& value) {
PyObject
*
obj
=
p_eager_tensor_type
->
tp_alloc
(
p_eager_tensor_type
,
0
);
if
(
obj
)
{
auto
v
=
reinterpret_cast
<
EagerTensorObject
*>
(
obj
);
new
(
&
(
v
->
eagertensor
))
egr
::
EagerTensor
();
v
->
eagertensor
=
value
[
i
];
new
(
&
(
v
->
eager
_
tensor
))
egr
::
EagerTensor
();
v
->
eager
_
tensor
=
value
[
i
];
}
else
{
PADDLE_THROW
(
platform
::
errors
::
Fatal
(
"tp_alloc return null, can not new a PyObject."
));
...
...
@@ -370,6 +387,12 @@ PyObject* ToPyObject(const platform::Place& value) {
return
obj
.
ptr
();
}
PyObject
*
ToPyObject
(
const
paddle
::
framework
::
proto
::
VarType
::
Type
&
dtype
)
{
auto
obj
=
::
pybind11
::
cast
(
dtype
);
obj
.
inc_ref
();
return
obj
.
ptr
();
}
PyObject
*
ToPyObject
(
const
void
*
value
)
{
if
(
value
==
nullptr
)
{
Py_INCREF
(
Py_None
);
...
...
@@ -399,7 +422,7 @@ egr::EagerTensor GetEagerTensorFromArgs(const std::string& op_type,
return
emptytensor
;
}
return
reinterpret_cast
<
EagerTensorObject
*>
(
obj
)
->
eagertensor
;
return
reinterpret_cast
<
EagerTensorObject
*>
(
obj
)
->
eager
_
tensor
;
}
std
::
vector
<
egr
::
EagerTensor
>
GetEagerTensorListFromArgs
(
...
...
@@ -430,7 +453,7 @@ std::vector<egr::EagerTensor> GetEagerTensorListFromArgs(
for
(
Py_ssize_t
i
=
0
;
i
<
len
;
i
++
)
{
result
.
emplace_back
(
reinterpret_cast
<
EagerTensorObject
*>
(
PyList_GetItem
(
list
,
i
))
->
eagertensor
);
->
eager
_
tensor
);
}
}
else
if
(
PyTuple_Check
(
list
))
{
Py_ssize_t
len
=
PyTuple_Size
(
list
);
...
...
@@ -443,7 +466,7 @@ std::vector<egr::EagerTensor> GetEagerTensorListFromArgs(
for
(
Py_ssize_t
i
=
0
;
i
<
len
;
i
++
)
{
result
.
emplace_back
(
reinterpret_cast
<
EagerTensorObject
*>
(
PyTuple_GetItem
(
list
,
i
))
->
eagertensor
);
->
eager
_
tensor
);
}
}
else
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
...
...
paddle/fluid/pybind/eager_utils.h
浏览文件 @
8305c2be
...
...
@@ -18,7 +18,7 @@ namespace paddle {
namespace
pybind
{
typedef
struct
{
PyObject_HEAD
egr
::
EagerTensor
eagertensor
;
PyObject_HEAD
egr
::
EagerTensor
eager
_
tensor
;
}
EagerTensorObject
;
int
TensorDtype2NumpyDtype
(
pten
::
DataType
dtype
);
...
...
@@ -35,7 +35,8 @@ egr::EagerTensor CastPyArg2EagerTensor(PyObject* obj, ssize_t arg_pos);
std
::
vector
<
egr
::
EagerTensor
>
CastPyArg2VectorOfEagerTensor
(
PyObject
*
obj
,
ssize_t
arg_pos
);
platform
::
Place
CastPyArg2Place
(
PyObject
*
obj
,
ssize_t
arg_pos
);
framework
::
proto
::
VarType
::
Type
CastPyArg2ProtoType
(
PyObject
*
obj
,
ssize_t
arg_pos
);
PyObject
*
ToPyObject
(
int
value
);
PyObject
*
ToPyObject
(
bool
value
);
PyObject
*
ToPyObject
(
int64_t
value
);
...
...
@@ -51,6 +52,7 @@ PyObject* ToPyObject(const std::vector<float>& value);
PyObject
*
ToPyObject
(
const
std
::
vector
<
double
>&
value
);
PyObject
*
ToPyObject
(
const
std
::
vector
<
egr
::
EagerTensor
>&
value
);
PyObject
*
ToPyObject
(
const
platform
::
Place
&
value
);
PyObject
*
ToPyObject
(
const
paddle
::
framework
::
proto
::
VarType
::
Type
&
dtype
);
PyObject
*
ToPyObject
(
const
void
*
value
);
template
<
typename
Tuple
,
size_t
N
>
...
...
paddle/fluid/pybind/exception.h
浏览文件 @
8305c2be
...
...
@@ -19,6 +19,12 @@ limitations under the License. */
#include "pybind11/pybind11.h"
#define EAGER_TRY try {
#define EAGER_SYNC_TRY \
try { \
if (self->eager_tensor.Var().IsInitialized()) { \
self->eager_tensor.SyncToTensor(); \
}
#define EAGER_CATCH_AND_THROW_RETURN_NULL \
} \
catch (...) { \
...
...
paddle/fluid/pybind/imperative.cc
浏览文件 @
8305c2be
...
...
@@ -29,6 +29,7 @@ limitations under the License. */
#include <utility>
#include <vector>
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/framework/scope_guard.h"
#include "paddle/fluid/imperative/all_reduce.h"
#include "paddle/fluid/imperative/amp_auto_cast.h"
...
...
@@ -868,9 +869,18 @@ void BindImperative(py::module *m_ptr) {
m
.
def
(
"_dygraph_debug_level"
,
[]()
{
return
imperative
::
GetDebugLevel
();
});
m
.
def
(
"_switch_tracer"
,
[](
const
std
::
shared_ptr
<
imperative
::
Tracer
>
&
tracer
)
{
imperative
::
SetCurrentTracer
(
tracer
);
if
(
egr
::
Controller
::
Instance
().
InEagerMode
())
{
egr
::
Controller
::
Instance
().
SetCurrentTracer
(
tracer
);
}
else
{
imperative
::
SetCurrentTracer
(
tracer
);
}
});
m
.
def
(
"_enable_eager_mode"
,
[]()
{
egr
::
Controller
::
Instance
().
SetInEagerMode
(
true
);
});
m
.
def
(
"_disable_eager_mode"
,
[]()
{
egr
::
Controller
::
Instance
().
SetInEagerMode
(
false
);
});
m
.
def
(
"_in_eager_mode"
,
[]()
{
return
egr
::
Controller
::
Instance
().
InEagerMode
();
});
py
::
class_
<
imperative
::
VarBase
,
std
::
shared_ptr
<
imperative
::
VarBase
>>
varbase
(
m
,
"VarBase"
,
R"DOC()DOC"
);
g_varbase_pytype
=
(
PyTypeObject
*
)
varbase
.
ptr
();
// NOLINT
...
...
python/paddle/fluid/core.py
浏览文件 @
8305c2be
...
...
@@ -268,6 +268,9 @@ if avx_supported():
from
.core_avx
import
_is_dygraph_debug_enabled
from
.core_avx
import
_dygraph_debug_level
from
.core_avx
import
_switch_tracer
from
.core_avx
import
_disable_eager_mode
from
.core_avx
import
_enable_eager_mode
from
.core_avx
import
_in_eager_mode
from
.core_avx
import
_set_paddle_lib_path
from
.core_avx
import
_create_loaded_parameter
from
.core_avx
import
_cuda_synchronize
...
...
@@ -321,6 +324,9 @@ if load_noavx:
from
.core_noavx
import
_is_dygraph_debug_enabled
from
.core_noavx
import
_dygraph_debug_level
from
.core_noavx
import
_switch_tracer
from
.core_noavx
import
_disable_eager_mode
from
.core_noavx
import
_enable_eager_mode
from
.core_noavx
import
_in_eager_mode
from
.core_noavx
import
_set_paddle_lib_path
from
.core_noavx
import
_create_loaded_parameter
from
.core_noavx
import
_cuda_synchronize
...
...
python/paddle/fluid/framework.py
浏览文件 @
8305c2be
...
...
@@ -46,8 +46,6 @@ __all__ = [
'Program'
,
'default_startup_program'
,
'default_main_program'
,
'eager_guard'
,
'in_eager_mode'
,
'program_guard'
,
'name_scope'
,
'cuda_places'
,
...
...
@@ -79,46 +77,20 @@ _current_device = None
global_prog_seed
=
0
_current_pipeline_stage
=
None
_global_flags_
=
core
.
globals
()
_eager_mode_
=
False
core
.
_disable_eager_mode
()
@
signature_safe_contextmanager
def
eager_mode_place_guard
(
place
):
if
place
is
not
None
:
expected_place
=
_get_paddle_place
(
place
)
else
:
expected_place
=
_current_expected_place
()
global
_global_expected_place_
tmp_place
=
_global_expected_place_
_global_expected_place_
=
expected_place
_set_expected_place
(
expected_place
)
try
:
yield
finally
:
_global_expected_place_
=
tmp_place
_set_expected_place
(
tmp_place
)
@
signature_safe_contextmanager
def
eager_guard
(
place
=
None
):
global
_eager_mode_
_eager_mode_
=
True
def
_test_eager_guard
():
core
.
_enable_eager_mode
()
_C_ops
.
switch_to_eager_ops
()
try
:
with
eager_mode_place_guard
(
place
):
yield
yield
finally
:
_eager_mode_
=
False
core
.
_disable_eager_mode
()
_C_ops
.
switch_to_core_ops
()
def
in_eager_mode
():
return
_eager_mode_
def
require_version
(
min_version
,
max_version
=
None
):
"""
Check if the installed version of PaddlePaddle is in [min_version, max_version],
...
...
@@ -256,6 +228,10 @@ def in_dygraph_mode():
return
_dygraph_tracer_
is
not
None
def
_in_eager_mode
():
return
core
.
_in_eager_mode
()
and
in_dygraph_mode
()
def
_dygraph_not_support_
(
func
):
def
__impl__
(
*
args
,
**
kwargs
):
assert
not
in_dygraph_mode
(
...
...
@@ -382,10 +358,9 @@ def _set_dygraph_tracer_expected_place(place):
def
_set_expected_place
(
place
):
global
_global_expected_place_
_global_expected_place_
=
place
if
in_eager_mode
():
if
_
in_eager_mode
():
return
core
.
eager
.
_set_expected_place
(
place
)
else
:
_set_dygraph_tracer_expected_place
(
place
)
_set_dygraph_tracer_expected_place
(
place
)
# TODO(zhiqiu): remove this function.
...
...
@@ -6441,14 +6416,17 @@ def _dygraph_place_guard(place):
global
_global_expected_place_
tmp_place
=
_global_expected_place_
_global_expected_place_
=
place
if
_in_eager_mode
():
core
.
eager
.
_set_expected_place
(
place
)
_set_dygraph_tracer_expected_place
(
place
)
try
:
yield
finally
:
_global_expected_place_
=
tmp_place
_set_dygraph_tracer_expected_place
(
tmp_place
)
if
_in_eager_mode
():
core
.
eager
.
_set_expected_place
(
_global_expected_place_
)
_set_dygraph_tracer_expected_place
(
_global_expected_place_
)
def
switch_device
(
device
):
...
...
python/paddle/fluid/tests/unittests/test_egr_code_generate_api.py
浏览文件 @
8305c2be
...
...
@@ -16,13 +16,13 @@ import paddle.fluid.core as core
import
paddle.fluid.eager.eager_tensor_patch_methods
as
eager_tensor_patch_methods
import
paddle
import
numpy
as
np
from
paddle.fluid
import
eager_guard
from
paddle.fluid
.framework
import
_test_
eager_guard
import
unittest
class
EagerOpAPIGenerateTestCase
(
unittest
.
TestCase
):
def
test_elementwise_add
(
self
):
with
eager_guard
():
with
_test_
eager_guard
():
paddle
.
set_device
(
"cpu"
)
np_x
=
np
.
ones
([
4
,
16
,
16
,
32
]).
astype
(
'float32'
)
np_y
=
np
.
ones
([
4
,
16
,
16
,
32
]).
astype
(
'float32'
)
...
...
@@ -35,7 +35,7 @@ class EagerOpAPIGenerateTestCase(unittest.TestCase):
self
.
assertTrue
(
np
.
array_equal
(
out_arr
,
out_arr_expected
))
def
test_sum
(
self
):
with
eager_guard
():
with
_test_
eager_guard
():
x_data
=
np
.
array
(
[[
0.2
,
0.3
,
0.5
,
0.9
],
[
0.1
,
0.2
,
0.6
,
0.7
]]).
astype
(
'float32'
)
x
=
paddle
.
to_tensor
(
x_data
,
'float32'
)
...
...
@@ -45,7 +45,7 @@ class EagerOpAPIGenerateTestCase(unittest.TestCase):
self
.
assertTrue
(
np
.
array_equal
(
out_arr
,
out_arr_expected
))
def
test_mm
(
self
):
with
eager_guard
():
with
_test_
eager_guard
():
np_input
=
np
.
random
.
random
([
16
,
32
]).
astype
(
'float32'
)
np_mat2
=
np
.
random
.
random
([
32
,
32
]).
astype
(
'float32'
)
input
=
paddle
.
to_tensor
(
np_input
)
...
...
@@ -56,7 +56,7 @@ class EagerOpAPIGenerateTestCase(unittest.TestCase):
self
.
assertTrue
(
np
.
allclose
(
out_arr
,
out_arr_expected
))
def
test_sigmoid
(
self
):
with
eager_guard
():
with
_test_
eager_guard
():
np_x
=
np
.
array
([
-
0.4
,
-
0.2
,
0.1
,
0.3
]).
astype
(
'float32'
)
x
=
paddle
.
to_tensor
(
np_x
)
out
=
paddle
.
nn
.
functional
.
sigmoid
(
x
)
...
...
python/paddle/fluid/tests/unittests/test_egr_python_api.py
浏览文件 @
8305c2be
...
...
@@ -16,13 +16,14 @@ import paddle.fluid.core as core
import
paddle.fluid.eager.eager_tensor_patch_methods
as
eager_tensor_patch_methods
import
paddle
import
numpy
as
np
from
paddle.fluid
import
eager_guard
from
paddle.fluid.framework
import
_test_eager_guard
from
paddle.fluid.data_feeder
import
convert_dtype
import
unittest
class
EagerScaleTestCase
(
unittest
.
TestCase
):
def
test_scale_base
(
self
):
with
eager_guard
():
with
_test_
eager_guard
():
paddle
.
set_device
(
"cpu"
)
arr
=
np
.
ones
([
4
,
16
,
16
,
32
]).
astype
(
'float32'
)
tensor
=
paddle
.
to_tensor
(
arr
,
'float32'
,
core
.
CPUPlace
())
...
...
@@ -35,7 +36,7 @@ class EagerScaleTestCase(unittest.TestCase):
self
.
assertEqual
(
tensor
.
stop_gradient
,
True
)
def
test_retain_grad_and_run_backward
(
self
):
with
eager_guard
():
with
_test_
eager_guard
():
paddle
.
set_device
(
"cpu"
)
input_data
=
np
.
ones
([
4
,
16
,
16
,
32
]).
astype
(
'float32'
)
...
...
@@ -55,33 +56,38 @@ class EagerScaleTestCase(unittest.TestCase):
class
EagerDtypeTestCase
(
unittest
.
TestCase
):
def
check_to_tesnsor_and_numpy
(
self
,
dtype
):
with
eager_guard
():
def
check_to_tesnsor_and_numpy
(
self
,
dtype
,
proto_dtype
):
with
_test_
eager_guard
():
arr
=
np
.
random
.
random
([
4
,
16
,
16
,
32
]).
astype
(
dtype
)
tensor
=
paddle
.
to_tensor
(
arr
,
dtype
)
self
.
assertEqual
(
tensor
.
dtype
,
dtype
)
self
.
assertEqual
(
tensor
.
dtype
,
proto_
dtype
)
self
.
assertTrue
(
np
.
array_equal
(
arr
,
tensor
.
numpy
()))
def
test_dtype_base
(
self
):
self
.
check_to_tesnsor_and_numpy
(
'bool'
)
self
.
check_to_tesnsor_and_numpy
(
'int8'
)
self
.
check_to_tesnsor_and_numpy
(
'uint8'
)
self
.
check_to_tesnsor_and_numpy
(
'int16'
)
self
.
check_to_tesnsor_and_numpy
(
'int32'
)
self
.
check_to_tesnsor_and_numpy
(
'int64'
)
self
.
check_to_tesnsor_and_numpy
(
'float16'
)
self
.
check_to_tesnsor_and_numpy
(
'float32'
)
self
.
check_to_tesnsor_and_numpy
(
'float64'
)
self
.
check_to_tesnsor_and_numpy
(
'complex64'
)
self
.
check_to_tesnsor_and_numpy
(
'complex128'
)
print
(
"Test_dtype"
)
self
.
check_to_tesnsor_and_numpy
(
'bool'
,
core
.
VarDesc
.
VarType
.
BOOL
)
self
.
check_to_tesnsor_and_numpy
(
'int8'
,
core
.
VarDesc
.
VarType
.
INT8
)
self
.
check_to_tesnsor_and_numpy
(
'uint8'
,
core
.
VarDesc
.
VarType
.
UINT8
)
self
.
check_to_tesnsor_and_numpy
(
'int16'
,
core
.
VarDesc
.
VarType
.
INT16
)
self
.
check_to_tesnsor_and_numpy
(
'int32'
,
core
.
VarDesc
.
VarType
.
INT32
)
self
.
check_to_tesnsor_and_numpy
(
'int64'
,
core
.
VarDesc
.
VarType
.
INT64
)
self
.
check_to_tesnsor_and_numpy
(
'float16'
,
core
.
VarDesc
.
VarType
.
FP16
)
self
.
check_to_tesnsor_and_numpy
(
'float32'
,
core
.
VarDesc
.
VarType
.
FP32
)
self
.
check_to_tesnsor_and_numpy
(
'float64'
,
core
.
VarDesc
.
VarType
.
FP64
)
self
.
check_to_tesnsor_and_numpy
(
'complex64'
,
core
.
VarDesc
.
VarType
.
COMPLEX64
)
self
.
check_to_tesnsor_and_numpy
(
'complex128'
,
core
.
VarDesc
.
VarType
.
COMPLEX128
)
class
EagerTensorPropertiesTestCase
(
unittest
.
TestCase
):
def
test_properties
(
self
):
with
eager_guard
():
print
(
"Test_properties"
)
with
_test_eager_guard
():
paddle
.
set_device
(
"cpu"
)
arr
=
np
.
ones
([
4
,
16
,
16
,
32
]).
astype
(
'float32'
)
tensor
=
paddle
.
to_tensor
(
arr
,
'float32'
,
core
.
CPUPlace
())
tensor
=
paddle
.
to_tensor
(
arr
,
core
.
VarDesc
.
VarType
.
FP32
,
core
.
CPUPlace
())
self
.
assertEqual
(
tensor
.
shape
,
[
4
,
16
,
16
,
32
])
tensor
.
name
=
'tensor_name_test'
self
.
assertEqual
(
tensor
.
name
,
'tensor_name_test'
)
...
...
@@ -98,6 +104,25 @@ class EagerTensorPropertiesTestCase(unittest.TestCase):
tensor
.
stop_gradient
=
True
self
.
assertEqual
(
tensor
.
stop_gradient
,
True
)
def
test_global_properties
(
self
):
print
(
"Test_global_properties"
)
self
.
assertFalse
(
core
.
_in_eager_mode
())
with
_test_eager_guard
():
self
.
assertTrue
(
core
.
_in_eager_mode
())
self
.
assertFalse
(
core
.
_in_eager_mode
())
def
test_place_guard
(
self
):
core
.
_enable_eager_mode
()
if
core
.
is_compiled_with_cuda
():
paddle
.
set_device
(
"gpu:0"
)
with
paddle
.
fluid
.
framework
.
_dygraph_place_guard
(
core
.
CPUPlace
()):
self
.
assertTrue
(
core
.
eager
.
_get_expected_place
().
is_cpu_place
())
else
:
paddle
.
set_device
(
"cpu"
)
with
paddle
.
fluid
.
framework
.
_dygraph_place_guard
(
core
.
CPUPlace
()):
self
.
assertTrue
(
core
.
eager
.
_get_expected_place
().
is_cpu_place
())
core
.
_disable_eager_mode
()
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/tensor/creation.py
浏览文件 @
8305c2be
...
...
@@ -31,7 +31,7 @@ from ..fluid.framework import convert_np_dtype_to_dtype_, in_dygraph_mode, _varb
from
..fluid.layers
import
linspace
# noqa: F401
import
paddle
from
paddle
import
_C_ops
from
..fluid.framework
import
in_eager_mode
from
..fluid.framework
import
_
in_eager_mode
__all__
=
[]
...
...
@@ -116,7 +116,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
)
!=
_current_expected_place
().
_get_device_id
():
place
=
_current_expected_place
()
if
in_eager_mode
():
if
_
in_eager_mode
():
if
dtype
is
None
:
dtype
=
paddle
.
get_default_dtype
()
return
core
.
eager
.
to_tensor
(
data
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录