Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
289aba75
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
289aba75
编写于
1月 22, 2019
作者:
P
Paddle CI
提交者:
minqiyang
1月 22, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Polish code
test=develop
上级
cf7229d2
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
60 addition
and
42 deletion
+60
-42
paddle/fluid/imperative/layer.cc
paddle/fluid/imperative/layer.cc
+24
-19
paddle/fluid/imperative/layer.h
paddle/fluid/imperative/layer.h
+4
-3
paddle/fluid/imperative/tracer.cc
paddle/fluid/imperative/tracer.cc
+3
-4
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+7
-2
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+10
-4
python/paddle/fluid/imperative/base.py
python/paddle/fluid/imperative/base.py
+6
-7
python/paddle/fluid/tests/unittests/test_imperative_gan.py
python/paddle/fluid/tests/unittests/test_imperative_gan.py
+1
-1
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
...paddle/fluid/tests/unittests/test_imperative_optimizer.py
+1
-1
python/paddle/fluid/tests/unittests/test_imperative_resnet.py
...on/paddle/fluid/tests/unittests/test_imperative_resnet.py
+4
-1
未找到文件。
paddle/fluid/imperative/layer.cc
浏览文件 @
289aba75
...
@@ -49,8 +49,7 @@ class TensorAddToFunctor : public boost::static_visitor<> {
...
@@ -49,8 +49,7 @@ class TensorAddToFunctor : public boost::static_visitor<> {
void
operator
()(
const
platform
::
CPUPlace
&
place
)
{
void
operator
()(
const
platform
::
CPUPlace
&
place
)
{
platform
::
CPUDeviceContext
*
ctx
=
dynamic_cast
<
platform
::
CPUDeviceContext
*>
(
platform
::
CPUDeviceContext
*
ctx
=
dynamic_cast
<
platform
::
CPUDeviceContext
*>
(
platform
::
DeviceContextPool
::
Instance
().
Get
(
place
));
platform
::
DeviceContextPool
::
Instance
().
Get
(
place
));
auto
blas
=
auto
blas
=
operators
::
math
::
GetBlas
<
platform
::
CPUDeviceContext
,
T
>
(
*
ctx
);
operators
::
math
::
GetBlas
<
platform
::
CPUDeviceContext
,
float
>
(
*
ctx
);
blas
.
AXPY
(
numel_
,
1.
,
x_
,
y_
);
blas
.
AXPY
(
numel_
,
1.
,
x_
,
y_
);
}
}
...
@@ -59,8 +58,7 @@ class TensorAddToFunctor : public boost::static_visitor<> {
...
@@ -59,8 +58,7 @@ class TensorAddToFunctor : public boost::static_visitor<> {
platform
::
CUDADeviceContext
*
ctx
=
platform
::
CUDADeviceContext
*
ctx
=
dynamic_cast
<
platform
::
CUDADeviceContext
*>
(
dynamic_cast
<
platform
::
CUDADeviceContext
*>
(
platform
::
DeviceContextPool
::
Instance
().
Get
(
place
));
platform
::
DeviceContextPool
::
Instance
().
Get
(
place
));
auto
blas
=
auto
blas
=
operators
::
math
::
GetBlas
<
platform
::
CUDADeviceContext
,
T
>
(
*
ctx
);
operators
::
math
::
GetBlas
<
platform
::
CUDADeviceContext
,
float
>
(
*
ctx
);
blas
.
AXPY
(
numel_
,
1.
,
x_
,
y_
);
blas
.
AXPY
(
numel_
,
1.
,
x_
,
y_
);
}
}
#else
#else
...
@@ -82,7 +80,7 @@ class TensorAddToFunctor : public boost::static_visitor<> {
...
@@ -82,7 +80,7 @@ class TensorAddToFunctor : public boost::static_visitor<> {
}
// namespace detail
}
// namespace detail
void
Add
Grad
To
(
Variable
*
src
,
Variable
*
dst
,
platform
::
Place
place
)
{
void
AddTo
(
Variable
*
src
,
Variable
*
dst
,
platform
::
Place
place
)
{
framework
::
Tensor
*
dst_tensor
=
dst
->
GetMutable
<
framework
::
LoDTensor
>
();
framework
::
Tensor
*
dst_tensor
=
dst
->
GetMutable
<
framework
::
LoDTensor
>
();
framework
::
Tensor
*
src_tensor
=
src
->
GetMutable
<
framework
::
LoDTensor
>
();
framework
::
Tensor
*
src_tensor
=
src
->
GetMutable
<
framework
::
LoDTensor
>
();
...
@@ -170,27 +168,34 @@ class Autograd {
...
@@ -170,27 +168,34 @@ class Autograd {
}
}
};
};
framework
::
LoDTensor
*
VarBase
::
CopiedTensor
()
const
{
VarBase
*
VarBase
::
NewVarBase
(
const
platform
::
Place
&
dst_place
,
const
bool
blocking
)
const
{
PADDLE_ENFORCE
(
var_
->
IsInitialized
(),
PADDLE_ENFORCE
(
var_
->
IsInitialized
(),
"Variable must be initialized when getting numpy tensor"
);
"Variable must be initialized when getting numpy tensor"
);
platform
::
Place
place
=
var_
->
Get
<
framework
::
LoDTensor
>
().
place
();
framework
::
LoDTensor
*
result
=
new
framework
::
LoDTensor
();
result
->
Resize
(
var_
->
Get
<
framework
::
LoDTensor
>
().
dims
());
result
->
set_lod
(
var_
->
Get
<
framework
::
LoDTensor
>
().
lod
());
if
(
platform
::
is_gpu_place
(
place
))
{
VLOG
(
3
)
<<
"fetch tensor "
<<
var_desc_
->
Name
()
<<
" from gpu"
;
framework
::
TensorCopy
(
var_
->
Get
<
framework
::
LoDTensor
>
(),
VarBase
*
new_var
=
new
VarBase
();
platform
::
CPUPlace
(),
result
);
framework
::
LoDTensor
*
tensor
=
new_var
->
var_
->
GetMutable
<
framework
::
LoDTensor
>
();
tensor
->
Resize
(
var_
->
Get
<
framework
::
LoDTensor
>
().
dims
());
tensor
->
set_lod
(
var_
->
Get
<
framework
::
LoDTensor
>
().
lod
());
if
(
blocking
)
{
platform
::
DeviceContext
*
dev_ctx
=
platform
::
DeviceContext
*
dev_ctx
=
platform
::
DeviceContextPool
::
Instance
().
Get
(
place
);
platform
::
DeviceContextPool
::
Instance
().
Get
(
dst_place
);
framework
::
TensorCopySync
(
var_
->
Get
<
framework
::
LoDTensor
>
(),
dst_place
,
tensor
);
dev_ctx
->
Wait
();
dev_ctx
->
Wait
();
}
else
{
}
else
{
TensorCopy
(
var_
->
Get
<
framework
::
LoDTensor
>
(),
platform
::
CPUPlace
(),
result
);
framework
::
TensorCopy
(
var_
->
Get
<
framework
::
LoDTensor
>
(),
dst_place
,
tensor
);
}
if
(
platform
::
is_gpu_place
(
dst_place
))
{
VLOG
(
3
)
<<
"copy tensor "
<<
var_desc_
->
Name
()
<<
" from gpu"
;
}
}
return
result
;
return
new_var
;
}
}
framework
::
LoDTensor
&
VarBase
::
GradValue
()
{
framework
::
LoDTensor
&
VarBase
::
GradValue
()
{
...
@@ -235,7 +240,7 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
...
@@ -235,7 +240,7 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
PADDLE_ENFORCE_NOT_NULL
(
op_kernel
,
"only support op with kernel"
);
PADDLE_ENFORCE_NOT_NULL
(
op_kernel
,
"only support op with kernel"
);
framework
::
Scope
scope
;
framework
::
Scope
scope
;
platform
::
Place
place
=
expected_
place_
;
platform
::
Place
place
=
place_
;
PreparedOp
p
=
PreparedOp
::
Prepare
(
ctx
,
*
op_kernel
,
place
);
PreparedOp
p
=
PreparedOp
::
Prepare
(
ctx
,
*
op_kernel
,
place
);
p
.
op
.
RuntimeInferShape
(
scope
,
place
,
ctx
);
p
.
op
.
RuntimeInferShape
(
scope
,
place
,
ctx
);
p
.
func
(
framework
::
ExecutionContext
(
p
.
op
,
scope
,
*
p
.
dev_ctx
,
p
.
ctx
));
p
.
func
(
framework
::
ExecutionContext
(
p
.
op
,
scope
,
*
p
.
dev_ctx
,
p
.
ctx
));
...
@@ -249,7 +254,7 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
...
@@ -249,7 +254,7 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
{
framework
::
Variable
*
grad
=
outputs
[
i
];
framework
::
Variable
*
grad
=
outputs
[
i
];
framework
::
Variable
*
orig_grad
=
origin_outputs
[
i
];
framework
::
Variable
*
orig_grad
=
origin_outputs
[
i
];
Add
GradTo
(
grad
,
orig_grad
,
expected_
place_
);
Add
To
(
grad
,
orig_grad
,
place_
);
delete
grad
;
delete
grad
;
}
}
}
}
...
...
paddle/fluid/imperative/layer.h
浏览文件 @
289aba75
...
@@ -153,7 +153,8 @@ class VarBase {
...
@@ -153,7 +153,8 @@ class VarBase {
framework
::
LoDTensor
&
GradValue
();
framework
::
LoDTensor
&
GradValue
();
framework
::
LoDTensor
*
CopiedTensor
()
const
;
VarBase
*
NewVarBase
(
const
platform
::
Place
&
dst_place
,
const
bool
blocking
)
const
;
inline
std
::
string
GradName
()
const
{
inline
std
::
string
GradName
()
const
{
PADDLE_ENFORCE
(
PADDLE_ENFORCE
(
...
@@ -184,7 +185,7 @@ class OpBase {
...
@@ -184,7 +185,7 @@ class OpBase {
forward_id_
(
-
1
),
forward_id_
(
-
1
),
grad_op_desc_
(
nullptr
),
grad_op_desc_
(
nullptr
),
backward_id_
(
-
1
),
backward_id_
(
-
1
),
expected_
place_
(
platform
::
CPUPlace
())
{}
place_
(
platform
::
CPUPlace
())
{}
virtual
~
OpBase
()
{
virtual
~
OpBase
()
{
if
(
grad_op_desc_
)
delete
grad_op_desc_
;
if
(
grad_op_desc_
)
delete
grad_op_desc_
;
...
@@ -201,7 +202,7 @@ class OpBase {
...
@@ -201,7 +202,7 @@ class OpBase {
framework
::
OpDesc
*
grad_op_desc_
;
framework
::
OpDesc
*
grad_op_desc_
;
int
backward_id_
;
int
backward_id_
;
platform
::
Place
expected_
place_
;
platform
::
Place
place_
;
VarBasePtrMap
input_vars_
;
VarBasePtrMap
input_vars_
;
VarBasePtrMap
output_vars_
;
VarBasePtrMap
output_vars_
;
...
...
paddle/fluid/imperative/tracer.cc
浏览文件 @
289aba75
...
@@ -131,10 +131,9 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs,
...
@@ -131,10 +131,9 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs,
PADDLE_ENFORCE_NOT_NULL
(
op_kernel
,
"only support op with kernel"
);
PADDLE_ENFORCE_NOT_NULL
(
op_kernel
,
"only support op with kernel"
);
framework
::
Scope
scope
;
framework
::
Scope
scope
;
op
->
expected_place_
=
GetExpectedPlace
(
expected_place
,
inputs
);
op
->
place_
=
GetExpectedPlace
(
expected_place
,
inputs
);
PreparedOp
prepared_op
=
PreparedOp
prepared_op
=
PreparedOp
::
Prepare
(
ctx
,
*
op_kernel
,
op
->
place_
);
PreparedOp
::
Prepare
(
ctx
,
*
op_kernel
,
op
->
expected_place_
);
prepared_op
.
op
.
RuntimeInferShape
(
scope
,
op
->
place_
,
ctx
);
prepared_op
.
op
.
RuntimeInferShape
(
scope
,
op
->
expected_place_
,
ctx
);
prepared_op
.
func
(
framework
::
ExecutionContext
(
prepared_op
.
func
(
framework
::
ExecutionContext
(
prepared_op
.
op
,
scope
,
*
prepared_op
.
dev_ctx
,
prepared_op
.
ctx
));
prepared_op
.
op
,
scope
,
*
prepared_op
.
dev_ctx
,
prepared_op
.
ctx
));
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
289aba75
...
@@ -137,8 +137,13 @@ PYBIND11_MODULE(core, m) {
...
@@ -137,8 +137,13 @@ PYBIND11_MODULE(core, m) {
.
def
(
"_grad_ivar"
,
.
def
(
"_grad_ivar"
,
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
grads_
;
},
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
grads_
;
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"_cpu_tensor"
,
.
def
(
"_to"
,
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
CopiedTensor
();
},
[](
const
imperative
::
VarBase
&
self
,
const
platform
::
CPUPlace
&
place
,
bool
blocking
)
{
return
self
.
NewVarBase
(
place
,
blocking
);
},
py
::
return_value_policy
::
take_ownership
)
.
def
(
"_to"
,
[](
const
imperative
::
VarBase
&
self
,
const
platform
::
CUDAPlace
&
place
,
bool
blocking
)
{
return
self
.
NewVarBase
(
place
,
blocking
);
},
py
::
return_value_policy
::
take_ownership
)
py
::
return_value_policy
::
take_ownership
)
.
def
(
"value"
,
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
var_
;
},
.
def
(
"value"
,
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
var_
;
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
...
...
python/paddle/fluid/framework.py
浏览文件 @
289aba75
...
@@ -385,8 +385,8 @@ class Variable(object):
...
@@ -385,8 +385,8 @@ class Variable(object):
self
.
_ivar
.
stop_gradient
=
stop_gradient
self
.
_ivar
.
stop_gradient
=
stop_gradient
def
_numpy
(
self
):
def
_numpy
(
self
):
tensor
=
self
.
_ivar
.
_cpu_tensor
(
)
new_ivar
=
self
.
_ivar
.
_to
(
core
.
CPUPlace
(),
True
)
return
np
.
array
(
tensor
)
return
np
.
array
(
new_ivar
.
value
().
get_tensor
()
)
def
_backward
(
self
):
def
_backward
(
self
):
self
.
_ivar
.
_run_backward
()
self
.
_ivar
.
_run_backward
()
...
@@ -2326,16 +2326,22 @@ def _get_var(name, program=None):
...
@@ -2326,16 +2326,22 @@ def _get_var(name, program=None):
@
contextlib
.
contextmanager
@
contextlib
.
contextmanager
def
_imperative_guard
(
tracer
,
place
):
def
_imperative_guard
(
tracer
):
global
_imperative_tracer_
global
_imperative_tracer_
tmp_trace
=
_imperative_tracer_
tmp_trace
=
_imperative_tracer_
_imperative_tracer_
=
tracer
_imperative_tracer_
=
tracer
yield
_imperative_tracer_
=
tmp_trace
@
contextlib
.
contextmanager
def
_imperative_place_guard
(
place
):
global
_current_expected_place_
global
_current_expected_place_
tmp_place
=
_current_expected_place_
tmp_place
=
_current_expected_place_
_current_expected_place_
=
place
_current_expected_place_
=
place
yield
yield
_imperative_tracer_
=
tmp_trace
_current_expected_place_
=
tmp_place
_current_expected_place_
=
tmp_place
python/paddle/fluid/imperative/base.py
浏览文件 @
289aba75
...
@@ -25,23 +25,22 @@ def enabled():
...
@@ -25,23 +25,22 @@ def enabled():
@
contextlib
.
contextmanager
@
contextlib
.
contextmanager
def
guard
(
device
=
0
):
def
guard
(
place
=
None
):
train
=
framework
.
Program
()
train
=
framework
.
Program
()
startup
=
framework
.
Program
()
startup
=
framework
.
Program
()
tracer
=
core
.
Tracer
(
train
.
current_block
().
desc
)
tracer
=
core
.
Tracer
(
train
.
current_block
().
desc
)
if
device
is
None
:
if
place
is
None
:
place
=
core
.
CPUPlace
()
else
:
if
core
.
is_compiled_with_cuda
():
if
core
.
is_compiled_with_cuda
():
place
=
core
.
CUDAPlace
(
device
)
place
=
core
.
CUDAPlace
(
0
)
else
:
else
:
place
=
core
.
CPUPlace
()
place
=
core
.
CPUPlace
()
with
framework
.
program_guard
(
train
,
startup
):
with
framework
.
program_guard
(
train
,
startup
):
with
framework
.
unique_name
.
guard
():
with
framework
.
unique_name
.
guard
():
with
framework
.
_imperative_guard
(
tracer
,
place
):
with
framework
.
_imperative_guard
(
tracer
):
yield
with
framework
.
_imperative_place_guard
(
place
):
yield
def
to_variable
(
value
,
block
=
None
):
def
to_variable
(
value
,
block
=
None
):
...
...
python/paddle/fluid/tests/unittests/test_imperative_gan.py
浏览文件 @
289aba75
...
@@ -135,7 +135,7 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -135,7 +135,7 @@ class TestImperativeMnist(unittest.TestCase):
scope
.
find_var
(
param
.
name
).
get_tensor
())
scope
.
find_var
(
param
.
name
).
get_tensor
())
dy_params
=
dict
()
dy_params
=
dict
()
with
fluid
.
imperative
.
guard
(
device
=
None
):
with
fluid
.
imperative
.
guard
(
place
=
fluid
.
CPUPlace
()
):
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
...
...
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
浏览文件 @
289aba75
...
@@ -101,7 +101,7 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -101,7 +101,7 @@ class TestImperativeMnist(unittest.TestCase):
def
test_mnist_cpu_float32
(
self
):
def
test_mnist_cpu_float32
(
self
):
seed
=
90
seed
=
90
with
fluid
.
imperative
.
guard
(
device
=
None
):
with
fluid
.
imperative
.
guard
(
place
=
fuild
.
CPUPlace
()
):
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
...
...
python/paddle/fluid/tests/unittests/test_imperative_resnet.py
浏览文件 @
289aba75
...
@@ -207,6 +207,9 @@ class TestImperativeResnet(unittest.TestCase):
...
@@ -207,6 +207,9 @@ class TestImperativeResnet(unittest.TestCase):
def
test_resnet_gpu_float32
(
self
):
def
test_resnet_gpu_float32
(
self
):
seed
=
90
seed
=
90
if
not
core
.
is_compiled_with_cuda
():
return
batch_size
=
train_parameters
[
"batch_size"
]
batch_size
=
train_parameters
[
"batch_size"
]
batch_num
=
1
batch_num
=
1
with
fluid
.
imperative
.
guard
():
with
fluid
.
imperative
.
guard
():
...
@@ -370,7 +373,7 @@ class TestImperativeResnet(unittest.TestCase):
...
@@ -370,7 +373,7 @@ class TestImperativeResnet(unittest.TestCase):
batch_size
=
train_parameters
[
"batch_size"
]
batch_size
=
train_parameters
[
"batch_size"
]
batch_num
=
1
batch_num
=
1
with
fluid
.
imperative
.
guard
(
device
=
None
):
with
fluid
.
imperative
.
guard
(
place
=
fluid
.
CPUPlace
()
):
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录