Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
c8965dc1
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c8965dc1
编写于
1月 23, 2019
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Polish code
test=develop
上级
289aba75
变更
11
显示空白变更内容
内联
并排
Showing
11 changed file
with
57 addition
and
208 deletion
+57
-208
paddle/fluid/imperative/layer.cc
paddle/fluid/imperative/layer.cc
+5
-6
paddle/fluid/imperative/layer.h
paddle/fluid/imperative/layer.h
+3
-2
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+12
-4
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+9
-8
python/paddle/fluid/imperative/nn.py
python/paddle/fluid/imperative/nn.py
+3
-5
python/paddle/fluid/layer_helper.py
python/paddle/fluid/layer_helper.py
+2
-2
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+0
-1
python/paddle/fluid/tests/unittests/test_imperative.py
python/paddle/fluid/tests/unittests/test_imperative.py
+11
-8
python/paddle/fluid/tests/unittests/test_imperative_gan.py
python/paddle/fluid/tests/unittests/test_imperative_gan.py
+5
-3
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
...paddle/fluid/tests/unittests/test_imperative_optimizer.py
+3
-2
python/paddle/fluid/tests/unittests/test_imperative_resnet.py
...on/paddle/fluid/tests/unittests/test_imperative_resnet.py
+4
-167
未找到文件。
paddle/fluid/imperative/layer.cc
浏览文件 @
c8965dc1
...
@@ -168,12 +168,12 @@ class Autograd {
...
@@ -168,12 +168,12 @@ class Autograd {
}
}
};
};
VarBase
*
VarBase
::
NewVarBase
(
const
platform
::
Place
&
dst_place
,
std
::
unique_ptr
<
VarBase
>
VarBase
::
NewVarBase
(
const
platform
::
Place
&
dst_place
,
const
bool
blocking
)
const
{
const
bool
blocking
)
const
{
PADDLE_ENFORCE
(
var_
->
IsInitialized
(),
PADDLE_ENFORCE
(
var_
->
IsInitialized
(),
"Variable must be initialized when getting numpy tensor"
);
"Variable must be initialized when getting numpy tensor"
);
VarBase
*
new_var
=
new
VarBase
(
);
std
::
unique_ptr
<
VarBase
>
new_var
(
new
VarBase
()
);
framework
::
LoDTensor
*
tensor
=
framework
::
LoDTensor
*
tensor
=
new_var
->
var_
->
GetMutable
<
framework
::
LoDTensor
>
();
new_var
->
var_
->
GetMutable
<
framework
::
LoDTensor
>
();
tensor
->
Resize
(
var_
->
Get
<
framework
::
LoDTensor
>
().
dims
());
tensor
->
Resize
(
var_
->
Get
<
framework
::
LoDTensor
>
().
dims
());
...
@@ -240,9 +240,8 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
...
@@ -240,9 +240,8 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
PADDLE_ENFORCE_NOT_NULL
(
op_kernel
,
"only support op with kernel"
);
PADDLE_ENFORCE_NOT_NULL
(
op_kernel
,
"only support op with kernel"
);
framework
::
Scope
scope
;
framework
::
Scope
scope
;
platform
::
Place
place
=
place_
;
PreparedOp
p
=
PreparedOp
::
Prepare
(
ctx
,
*
op_kernel
,
place_
);
PreparedOp
p
=
PreparedOp
::
Prepare
(
ctx
,
*
op_kernel
,
place
);
p
.
op
.
RuntimeInferShape
(
scope
,
place_
,
ctx
);
p
.
op
.
RuntimeInferShape
(
scope
,
place
,
ctx
);
p
.
func
(
framework
::
ExecutionContext
(
p
.
op
,
scope
,
*
p
.
dev_ctx
,
p
.
ctx
));
p
.
func
(
framework
::
ExecutionContext
(
p
.
op
,
scope
,
*
p
.
dev_ctx
,
p
.
ctx
));
}
}
...
...
paddle/fluid/imperative/layer.h
浏览文件 @
c8965dc1
...
@@ -21,6 +21,7 @@
...
@@ -21,6 +21,7 @@
#include <map> // NOLINT
#include <map> // NOLINT
#include <string> // NOLINT
#include <string> // NOLINT
#include <vector> // NOLINT
#include <vector> // NOLINT
#include <memory> // NOLINT
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/operator.h"
...
@@ -153,7 +154,7 @@ class VarBase {
...
@@ -153,7 +154,7 @@ class VarBase {
framework
::
LoDTensor
&
GradValue
();
framework
::
LoDTensor
&
GradValue
();
VarBase
*
NewVarBase
(
const
platform
::
Place
&
dst_place
,
std
::
unique_ptr
<
VarBase
>
NewVarBase
(
const
platform
::
Place
&
dst_place
,
const
bool
blocking
)
const
;
const
bool
blocking
)
const
;
inline
std
::
string
GradName
()
const
{
inline
std
::
string
GradName
()
const
{
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
c8965dc1
...
@@ -137,13 +137,21 @@ PYBIND11_MODULE(core, m) {
...
@@ -137,13 +137,21 @@ PYBIND11_MODULE(core, m) {
.
def
(
"_grad_ivar"
,
.
def
(
"_grad_ivar"
,
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
grads_
;
},
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
grads_
;
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
.
def
(
"_to"
,
.
def
(
"_
copy_
to"
,
[](
const
imperative
::
VarBase
&
self
,
const
platform
::
CPUPlace
&
place
,
[](
const
imperative
::
VarBase
&
self
,
const
platform
::
CPUPlace
&
place
,
bool
blocking
)
{
return
self
.
NewVarBase
(
place
,
blocking
);
},
bool
blocking
)
{
std
::
unique_ptr
<
imperative
::
VarBase
>
new_var
=
self
.
NewVarBase
(
place
,
blocking
);
return
new_var
.
release
();
},
py
::
return_value_policy
::
take_ownership
)
py
::
return_value_policy
::
take_ownership
)
.
def
(
"_to"
,
.
def
(
"_
copy_
to"
,
[](
const
imperative
::
VarBase
&
self
,
const
platform
::
CUDAPlace
&
place
,
[](
const
imperative
::
VarBase
&
self
,
const
platform
::
CUDAPlace
&
place
,
bool
blocking
)
{
return
self
.
NewVarBase
(
place
,
blocking
);
},
bool
blocking
)
{
std
::
unique_ptr
<
imperative
::
VarBase
>
new_var
=
self
.
NewVarBase
(
place
,
blocking
);
return
new_var
.
release
();
},
py
::
return_value_policy
::
take_ownership
)
py
::
return_value_policy
::
take_ownership
)
.
def
(
"value"
,
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
var_
;
},
.
def
(
"value"
,
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
var_
;
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
...
...
python/paddle/fluid/framework.py
浏览文件 @
c8965dc1
...
@@ -67,7 +67,7 @@ ZERO_VAR_SUFFIX = core.kZeroVarSuffix()
...
@@ -67,7 +67,7 @@ ZERO_VAR_SUFFIX = core.kZeroVarSuffix()
CONTROL_DEP_VAR_PREFIX
=
core
.
kControlDepVarName
()
CONTROL_DEP_VAR_PREFIX
=
core
.
kControlDepVarName
()
_imperative_tracer_
=
None
_imperative_tracer_
=
None
_current_expected_place_
=
None
_
imperative_
current_expected_place_
=
None
def
_in_imperative_mode
():
def
_in_imperative_mode
():
...
@@ -79,7 +79,7 @@ def _imperative_tracer():
...
@@ -79,7 +79,7 @@ def _imperative_tracer():
def
_current_expected_place
():
def
_current_expected_place
():
return
_current_expected_place_
return
_
imperative_
current_expected_place_
class
NameScope
(
object
):
class
NameScope
(
object
):
...
@@ -385,7 +385,7 @@ class Variable(object):
...
@@ -385,7 +385,7 @@ class Variable(object):
self
.
_ivar
.
stop_gradient
=
stop_gradient
self
.
_ivar
.
stop_gradient
=
stop_gradient
def
_numpy
(
self
):
def
_numpy
(
self
):
new_ivar
=
self
.
_ivar
.
_to
(
core
.
CPUPlace
(),
True
)
new_ivar
=
self
.
_ivar
.
_
copy_
to
(
core
.
CPUPlace
(),
True
)
return
np
.
array
(
new_ivar
.
value
().
get_tensor
())
return
np
.
array
(
new_ivar
.
value
().
get_tensor
())
def
_backward
(
self
):
def
_backward
(
self
):
...
@@ -1313,7 +1313,8 @@ class Block(object):
...
@@ -1313,7 +1313,8 @@ class Block(object):
def
_trace_op
(
self
,
op
,
stop_gradient
=
False
):
def
_trace_op
(
self
,
op
,
stop_gradient
=
False
):
if
_in_imperative_mode
():
if
_in_imperative_mode
():
_imperative_tracer
().
trace
(
op
.
iop
,
op
.
inputs
,
op
.
outputs
,
self
.
desc
,
_imperative_tracer
().
trace
(
op
.
iop
,
op
.
inputs
,
op
.
outputs
,
self
.
desc
,
_current_expected_place_
,
stop_gradient
)
_imperative_current_expected_place_
,
stop_gradient
)
def
_insert_op
(
self
,
index
,
*
args
,
**
kwargs
):
def
_insert_op
(
self
,
index
,
*
args
,
**
kwargs
):
"""
"""
...
@@ -2338,10 +2339,10 @@ def _imperative_guard(tracer):
...
@@ -2338,10 +2339,10 @@ def _imperative_guard(tracer):
@
contextlib
.
contextmanager
@
contextlib
.
contextmanager
def
_imperative_place_guard
(
place
):
def
_imperative_place_guard
(
place
):
global
_current_expected_place_
global
_
imperative_
current_expected_place_
tmp_place
=
_current_expected_place_
tmp_place
=
_
imperative_
current_expected_place_
_current_expected_place_
=
place
_
imperative_
current_expected_place_
=
place
yield
yield
_current_expected_place_
=
tmp_place
_
imperative_
current_expected_place_
=
tmp_place
python/paddle/fluid/imperative/nn.py
浏览文件 @
c8965dc1
...
@@ -144,7 +144,7 @@ class Conv2D(layers.Layer):
...
@@ -144,7 +144,7 @@ class Conv2D(layers.Layer):
attrs
=
{
'axis'
:
1
})
attrs
=
{
'axis'
:
1
})
# Currently, we don't support inplace in imperative mode
# Currently, we don't support inplace in imperative mode
return
self
.
_helper
.
append_activation
(
pre_act
,
force_no_inplace
=
True
)
return
self
.
_helper
.
append_activation
(
pre_act
)
class
Pool2D
(
layers
.
Layer
):
class
Pool2D
(
layers
.
Layer
):
...
@@ -286,8 +286,7 @@ class FC(layers.Layer):
...
@@ -286,8 +286,7 @@ class FC(layers.Layer):
else
:
else
:
pre_activation
=
pre_bias
pre_activation
=
pre_bias
# Currently, we don't support inplace in imperative mode
# Currently, we don't support inplace in imperative mode
return
self
.
_helper
.
append_activation
(
return
self
.
_helper
.
append_activation
(
pre_activation
)
pre_activation
,
force_no_inplace
=
True
)
class
BatchNorm
(
layers
.
Layer
):
class
BatchNorm
(
layers
.
Layer
):
...
@@ -419,5 +418,4 @@ class BatchNorm(layers.Layer):
...
@@ -419,5 +418,4 @@ class BatchNorm(layers.Layer):
})
})
# Currently, we don't support inplace in imperative mode
# Currently, we don't support inplace in imperative mode
return
self
.
_helper
.
append_activation
(
return
self
.
_helper
.
append_activation
(
batch_norm_out
)
batch_norm_out
,
force_no_inplace
=
True
)
python/paddle/fluid/layer_helper.py
浏览文件 @
c8965dc1
...
@@ -419,7 +419,7 @@ class LayerHelper(object):
...
@@ -419,7 +419,7 @@ class LayerHelper(object):
attrs
=
{
'axis'
:
dim_start
})
attrs
=
{
'axis'
:
dim_start
})
return
tmp
return
tmp
def
append_activation
(
self
,
input_var
,
force_no_inplace
=
False
):
def
append_activation
(
self
,
input_var
):
act
=
self
.
kwargs
.
get
(
'act'
,
None
)
act
=
self
.
kwargs
.
get
(
'act'
,
None
)
if
act
is
None
:
if
act
is
None
:
return
input_var
return
input_var
...
@@ -436,7 +436,7 @@ class LayerHelper(object):
...
@@ -436,7 +436,7 @@ class LayerHelper(object):
tmp
=
input_var
tmp
=
input_var
# NOTE(dzhwinter): some activation support inplace compution.
# NOTE(dzhwinter): some activation support inplace compution.
# NOTE(minqiyang): currently, we don't support inplace in imperative mode
# NOTE(minqiyang): currently, we don't support inplace in imperative mode
if
not
force_no_inplace
and
core
.
IsInplace
(
act_type
):
if
not
imperative_base
.
enabled
()
and
core
.
IsInplace
(
act_type
):
tmp
=
input_var
tmp
=
input_var
else
:
else
:
tmp
=
self
.
create_variable_for_type_inference
(
dtype
=
input_var
.
dtype
)
tmp
=
self
.
create_variable_for_type_inference
(
dtype
=
input_var
.
dtype
)
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
c8965dc1
...
@@ -388,7 +388,6 @@ class Optimizer(object):
...
@@ -388,7 +388,6 @@ class Optimizer(object):
params_grads
=
[]
params_grads
=
[]
for
param
in
parameters
:
for
param
in
parameters
:
if
param
.
stop_gradient
:
if
param
.
stop_gradient
:
print
(
"parameter:"
,
param
.
name
,
"stop gradient, skip it"
)
continue
continue
# create gradient variable
# create gradient variable
grad_var
=
Variable
(
grad_var
=
Variable
(
...
...
python/paddle/fluid/tests/unittests/test_imperative.py
浏览文件 @
c8965dc1
...
@@ -68,7 +68,7 @@ class MLP(fluid.imperative.Layer):
...
@@ -68,7 +68,7 @@ class MLP(fluid.imperative.Layer):
class
TestImperative
(
unittest
.
TestCase
):
class
TestImperative
(
unittest
.
TestCase
):
def
test_layer
(
self
):
def
test_layer
(
self
):
with
fluid
.
imperative
.
guard
(
device
=
None
):
with
fluid
.
imperative
.
guard
():
cl
=
core
.
Layer
()
cl
=
core
.
Layer
()
cl
.
forward
([])
cl
.
forward
([])
l
=
fluid
.
imperative
.
Layer
()
l
=
fluid
.
imperative
.
Layer
()
...
@@ -76,7 +76,7 @@ class TestImperative(unittest.TestCase):
...
@@ -76,7 +76,7 @@ class TestImperative(unittest.TestCase):
def
test_pylayer_func_id
(
self
):
def
test_pylayer_func_id
(
self
):
with
fluid
.
imperative
.
guard
(
device
=
None
):
with
fluid
.
imperative
.
guard
():
class
PyLayer1
(
fluid
.
imperative
.
PyLayer
):
class
PyLayer1
(
fluid
.
imperative
.
PyLayer
):
def
__init__
(
self
):
def
__init__
(
self
):
...
@@ -116,7 +116,7 @@ class TestImperative(unittest.TestCase):
...
@@ -116,7 +116,7 @@ class TestImperative(unittest.TestCase):
def
test_pylayer
(
self
):
def
test_pylayer
(
self
):
np_inp
=
np
.
ones
([
2
,
2
],
np
.
float32
)
np_inp
=
np
.
ones
([
2
,
2
],
np
.
float32
)
with
fluid
.
imperative
.
guard
(
device
=
None
):
with
fluid
.
imperative
.
guard
():
my_py_layer
=
MyPyLayer
()
my_py_layer
=
MyPyLayer
()
var_inp
=
fluid
.
imperative
.
base
.
to_variable
(
np_inp
)
var_inp
=
fluid
.
imperative
.
base
.
to_variable
(
np_inp
)
outs
=
my_py_layer
(
var_inp
)
outs
=
my_py_layer
(
var_inp
)
...
@@ -133,7 +133,8 @@ class TestImperative(unittest.TestCase):
...
@@ -133,7 +133,8 @@ class TestImperative(unittest.TestCase):
x
=
fluid
.
layers
.
reduce_sum
(
fluid
.
layers
.
tanh
(
x1
))
x
=
fluid
.
layers
.
reduce_sum
(
fluid
.
layers
.
tanh
(
x1
))
param_grads
=
fluid
.
backward
.
append_backward
(
param_grads
=
fluid
.
backward
.
append_backward
(
x
,
parameter_list
=
[
x1
.
name
])[
0
]
x
,
parameter_list
=
[
x1
.
name
])[
0
]
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
static_out
,
static_grad
=
exe
.
run
(
static_out
,
static_grad
=
exe
.
run
(
feed
=
{
inp
.
name
:
np_inp
},
feed
=
{
inp
.
name
:
np_inp
},
...
@@ -144,7 +145,7 @@ class TestImperative(unittest.TestCase):
...
@@ -144,7 +145,7 @@ class TestImperative(unittest.TestCase):
def
test_layer_in_out
(
self
):
def
test_layer_in_out
(
self
):
np_inp
=
np
.
array
([
1.0
,
2.0
,
-
1.0
],
dtype
=
np
.
float32
)
np_inp
=
np
.
array
([
1.0
,
2.0
,
-
1.0
],
dtype
=
np
.
float32
)
with
fluid
.
imperative
.
guard
(
device
=
None
):
with
fluid
.
imperative
.
guard
():
var_inp
=
fluid
.
imperative
.
base
.
to_variable
(
np_inp
)
var_inp
=
fluid
.
imperative
.
base
.
to_variable
(
np_inp
)
l
=
MyLayer
()
l
=
MyLayer
()
x
=
l
(
var_inp
)[
0
]
x
=
l
(
var_inp
)[
0
]
...
@@ -160,7 +161,8 @@ class TestImperative(unittest.TestCase):
...
@@ -160,7 +161,8 @@ class TestImperative(unittest.TestCase):
x
=
l
(
inp
)[
0
]
x
=
l
(
inp
)[
0
]
param_grads
=
fluid
.
backward
.
append_backward
(
param_grads
=
fluid
.
backward
.
append_backward
(
x
,
parameter_list
=
[
l
.
_x_for_debug
.
name
])[
0
]
x
,
parameter_list
=
[
l
.
_x_for_debug
.
name
])[
0
]
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
static_out
,
static_grad
=
exe
.
run
(
static_out
,
static_grad
=
exe
.
run
(
feed
=
{
inp
.
name
:
np_inp
},
feed
=
{
inp
.
name
:
np_inp
},
...
@@ -171,7 +173,7 @@ class TestImperative(unittest.TestCase):
...
@@ -171,7 +173,7 @@ class TestImperative(unittest.TestCase):
def
test_mlp
(
self
):
def
test_mlp
(
self
):
np_inp
=
np
.
array
([[
1.0
,
2.0
],
[
3.0
,
4.0
]],
dtype
=
np
.
float32
)
np_inp
=
np
.
array
([[
1.0
,
2.0
],
[
3.0
,
4.0
]],
dtype
=
np
.
float32
)
with
fluid
.
imperative
.
guard
(
device
=
None
):
with
fluid
.
imperative
.
guard
():
var_inp
=
fluid
.
imperative
.
base
.
to_variable
(
np_inp
)
var_inp
=
fluid
.
imperative
.
base
.
to_variable
(
np_inp
)
mlp
=
MLP
()
mlp
=
MLP
()
out
=
mlp
(
var_inp
)
out
=
mlp
(
var_inp
)
...
@@ -186,7 +188,8 @@ class TestImperative(unittest.TestCase):
...
@@ -186,7 +188,8 @@ class TestImperative(unittest.TestCase):
out
=
mlp
(
inp
)
out
=
mlp
(
inp
)
param_grads
=
fluid
.
backward
.
append_backward
(
param_grads
=
fluid
.
backward
.
append_backward
(
out
,
parameter_list
=
[
mlp
.
_fc1
.
_w
.
name
])[
0
]
out
,
parameter_list
=
[
mlp
.
_fc1
.
_w
.
name
])[
0
]
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
exe
.
run
(
fluid
.
default_startup_program
())
exe
.
run
(
fluid
.
default_startup_program
())
static_out
,
static_grad
=
exe
.
run
(
static_out
,
static_grad
=
exe
.
run
(
...
...
python/paddle/fluid/tests/unittests/test_imperative_gan.py
浏览文件 @
c8965dc1
...
@@ -20,6 +20,7 @@ import sys
...
@@ -20,6 +20,7 @@ import sys
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
from
paddle.fluid.optimizer
import
SGDOptimizer
from
paddle.fluid.optimizer
import
SGDOptimizer
from
paddle.fluid.imperative.nn
import
Conv2D
,
Pool2D
,
FC
from
paddle.fluid.imperative.nn
import
Conv2D
,
Pool2D
,
FC
from
test_imperative_base
import
new_program_scope
from
test_imperative_base
import
new_program_scope
...
@@ -58,7 +59,7 @@ class Generator(fluid.imperative.Layer):
...
@@ -58,7 +59,7 @@ class Generator(fluid.imperative.Layer):
class
TestImperativeMnist
(
unittest
.
TestCase
):
class
TestImperativeMnist
(
unittest
.
TestCase
):
def
test_
mnist_cpu
_float32
(
self
):
def
test_
gan
_float32
(
self
):
seed
=
90
seed
=
90
startup
=
fluid
.
Program
()
startup
=
fluid
.
Program
()
...
@@ -115,7 +116,8 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -115,7 +116,8 @@ class TestImperativeMnist(unittest.TestCase):
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
sgd
.
minimize
(
g_loss
)
sgd
.
minimize
(
g_loss
)
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
()
if
not
core
.
is_compiled_with_cuda
(
)
else
fluid
.
CUDAPlace
(
0
))
static_params
=
dict
()
static_params
=
dict
()
with
fluid
.
scope_guard
(
scope
):
with
fluid
.
scope_guard
(
scope
):
img
=
np
.
ones
([
2
,
1
],
np
.
float32
)
img
=
np
.
ones
([
2
,
1
],
np
.
float32
)
...
@@ -135,7 +137,7 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -135,7 +137,7 @@ class TestImperativeMnist(unittest.TestCase):
scope
.
find_var
(
param
.
name
).
get_tensor
())
scope
.
find_var
(
param
.
name
).
get_tensor
())
dy_params
=
dict
()
dy_params
=
dict
()
with
fluid
.
imperative
.
guard
(
place
=
fluid
.
CPUPlace
()
):
with
fluid
.
imperative
.
guard
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
...
...
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
浏览文件 @
c8965dc1
...
@@ -101,7 +101,7 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -101,7 +101,7 @@ class TestImperativeMnist(unittest.TestCase):
def
test_mnist_cpu_float32
(
self
):
def
test_mnist_cpu_float32
(
self
):
seed
=
90
seed
=
90
with
fluid
.
imperative
.
guard
(
place
=
fuild
.
CPUPlace
()
):
with
fluid
.
imperative
.
guard
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
...
@@ -145,7 +145,8 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -145,7 +145,8 @@ class TestImperativeMnist(unittest.TestCase):
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
mnist
=
MNIST
()
mnist
=
MNIST
()
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_resnet.py
浏览文件 @
c8965dc1
...
@@ -143,7 +143,7 @@ class BottleneckBlock(fluid.imperative.Layer):
...
@@ -143,7 +143,7 @@ class BottleneckBlock(fluid.imperative.Layer):
y
=
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv2
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv2
)
layer_helper
=
LayerHelper
(
'elementwise_add_activation'
,
act
=
'relu'
)
layer_helper
=
LayerHelper
(
'elementwise_add_activation'
,
act
=
'relu'
)
return
layer_helper
.
append_activation
(
y
,
force_no_inplace
=
True
)
return
layer_helper
.
append_activation
(
y
)
class
ResNet
(
fluid
.
imperative
.
Layer
):
class
ResNet
(
fluid
.
imperative
.
Layer
):
...
@@ -204,12 +204,9 @@ class ResNet(fluid.imperative.Layer):
...
@@ -204,12 +204,9 @@ class ResNet(fluid.imperative.Layer):
class
TestImperativeResnet
(
unittest
.
TestCase
):
class
TestImperativeResnet
(
unittest
.
TestCase
):
def
test_resnet_
gpu_
float32
(
self
):
def
test_resnet_float32
(
self
):
seed
=
90
seed
=
90
if
not
core
.
is_compiled_with_cuda
():
return
batch_size
=
train_parameters
[
"batch_size"
]
batch_size
=
train_parameters
[
"batch_size"
]
batch_num
=
1
batch_num
=
1
with
fluid
.
imperative
.
guard
():
with
fluid
.
imperative
.
guard
():
...
@@ -277,168 +274,8 @@ class TestImperativeResnet(unittest.TestCase):
...
@@ -277,168 +274,8 @@ class TestImperativeResnet(unittest.TestCase):
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
exe
=
fluid
.
Executor
(
fluid
.
CUDAPlace
(
0
))
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
resnet
=
ResNet
()
optimizer
=
optimizer_setting
(
train_parameters
)
np
.
random
.
seed
(
seed
)
import
random
random
.
seed
=
seed
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
flowers
.
train
(
use_xmap
=
False
),
batch_size
=
batch_size
)
img
=
fluid
.
layers
.
data
(
name
=
'pixel'
,
shape
=
[
3
,
224
,
224
],
dtype
=
'float32'
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
out
=
resnet
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
input
=
out
,
label
=
label
)
avg_loss
=
fluid
.
layers
.
mean
(
x
=
loss
)
optimizer
.
minimize
(
avg_loss
)
# initialize params and fetch them
static_param_init_value
=
{}
static_param_name_list
=
[]
static_grad_name_list
=
[]
for
param
in
fluid
.
default_startup_program
().
global_block
(
).
all_parameters
():
static_param_name_list
.
append
(
param
.
name
)
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
if
not
param
.
stop_gradient
:
static_grad_name_list
.
append
(
param
.
name
+
core
.
grad_var_suffix
())
out
=
exe
.
run
(
fluid
.
default_startup_program
(),
fetch_list
=
static_param_name_list
)
for
i
in
range
(
len
(
static_param_name_list
)):
static_param_init_value
[
static_param_name_list
[
i
]]
=
out
[
i
]
for
batch_id
,
data
in
enumerate
(
train_reader
()):
if
batch_id
>=
batch_num
:
break
static_x_data
=
np
.
array
(
[
x
[
0
].
reshape
(
3
,
224
,
224
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
[
batch_size
,
1
])
fetch_list
=
[
avg_loss
.
name
]
fetch_list
.
extend
(
static_param_name_list
)
fetch_list
.
extend
(
static_grad_name_list
)
out
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"pixel"
:
static_x_data
,
"label"
:
y_data
},
fetch_list
=
fetch_list
)
static_param_value
=
{}
static_grad_value
=
{}
static_out
=
out
[
0
]
param_start_pos
=
1
grad_start_pos
=
len
(
static_param_name_list
)
+
param_start_pos
for
i
in
range
(
param_start_pos
,
len
(
static_param_name_list
)
+
param_start_pos
):
static_param_value
[
static_param_name_list
[
i
-
param_start_pos
]]
=
out
[
i
]
for
i
in
range
(
grad_start_pos
,
len
(
static_grad_name_list
)
+
grad_start_pos
):
static_grad_value
[
static_grad_name_list
[
i
-
grad_start_pos
]]
=
out
[
i
]
self
.
assertTrue
(
np
.
allclose
(
static_out
,
dy_out
))
self
.
assertEqual
(
len
(
dy_param_init_value
),
len
(
static_param_init_value
))
for
key
,
value
in
six
.
iteritems
(
static_param_init_value
):
self
.
assertTrue
(
np
.
allclose
(
value
,
dy_param_init_value
[
key
]))
self
.
assertTrue
(
np
.
isfinite
(
value
.
all
()))
self
.
assertFalse
(
np
.
isnan
(
value
.
any
()))
self
.
assertEqual
(
len
(
dy_grad_value
),
len
(
static_grad_value
))
for
key
,
value
in
six
.
iteritems
(
static_grad_value
):
# TODO(minqiyang): find a way to align the gradient
self
.
assertTrue
(
np
.
allclose
(
value
,
dy_grad_value
[
key
]))
self
.
assertTrue
(
np
.
isfinite
(
value
.
all
()))
self
.
assertFalse
(
np
.
isnan
(
value
.
any
()))
self
.
assertEqual
(
len
(
dy_param_value
),
len
(
static_param_value
))
for
key
,
value
in
six
.
iteritems
(
static_param_value
):
self
.
assertTrue
(
np
.
allclose
(
value
,
dy_param_value
[
key
]))
self
.
assertTrue
(
np
.
isfinite
(
value
.
all
()))
self
.
assertFalse
(
np
.
isnan
(
value
.
any
()))
def
test_resnet_cpu_float32
(
self
):
seed
=
90
batch_size
=
train_parameters
[
"batch_size"
]
batch_num
=
1
with
fluid
.
imperative
.
guard
(
place
=
fluid
.
CPUPlace
()):
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
resnet
=
ResNet
()
optimizer
=
optimizer_setting
(
train_parameters
)
np
.
random
.
seed
(
seed
)
import
random
random
.
seed
=
seed
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
flowers
.
train
(
use_xmap
=
False
),
batch_size
=
batch_size
)
dy_param_init_value
=
{}
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
dy_param_init_value
[
param
.
name
]
=
param
.
_numpy
()
for
batch_id
,
data
in
enumerate
(
train_reader
()):
if
batch_id
>=
batch_num
:
break
dy_x_data
=
np
.
array
(
[
x
[
0
].
reshape
(
3
,
224
,
224
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
batch_size
,
1
)
img
=
to_variable
(
dy_x_data
)
label
=
to_variable
(
y_data
)
label
.
_stop_gradient
=
True
out
=
resnet
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
input
=
out
,
label
=
label
)
avg_loss
=
fluid
.
layers
.
mean
(
x
=
loss
)
dy_out
=
avg_loss
.
_numpy
()
if
batch_id
==
0
:
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
if
param
.
name
not
in
dy_param_init_value
:
dy_param_init_value
[
param
.
name
]
=
param
.
_numpy
()
avg_loss
.
_backward
()
dy_grad_value
=
{}
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
if
not
param
.
stop_gradient
:
np_array
=
np
.
array
(
param
.
_ivar
.
_grad_ivar
().
value
()
.
get_tensor
())
dy_grad_value
[
param
.
name
+
core
.
grad_var_suffix
(
)]
=
np_array
optimizer
.
minimize
(
avg_loss
)
dy_param_value
=
{}
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
dy_param_value
[
param
.
name
]
=
param
.
_numpy
()
with
new_program_scope
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
resnet
=
ResNet
()
resnet
=
ResNet
()
optimizer
=
optimizer_setting
(
train_parameters
)
optimizer
=
optimizer_setting
(
train_parameters
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录