Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
f53e1d5c
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f53e1d5c
编写于
2月 20, 2019
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
implement ClearBlock
上级
52e5ee60
变更
7
显示空白变更内容
内联
并排
Showing
7 changed file
with
152 addition
and
116 deletion
+152
-116
paddle/fluid/framework/block_desc.cc
paddle/fluid/framework/block_desc.cc
+14
-0
paddle/fluid/framework/block_desc.h
paddle/fluid/framework/block_desc.h
+2
-0
paddle/fluid/imperative/layer.h
paddle/fluid/imperative/layer.h
+5
-5
paddle/fluid/imperative/tracer.cc
paddle/fluid/imperative/tracer.cc
+24
-2
paddle/fluid/pybind/protobuf.cc
paddle/fluid/pybind/protobuf.cc
+3
-0
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+11
-4
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
...paddle/fluid/tests/unittests/test_imperative_optimizer.py
+93
-105
未找到文件。
paddle/fluid/framework/block_desc.cc
浏览文件 @
f53e1d5c
...
...
@@ -163,6 +163,20 @@ std::vector<OpDesc *> BlockDesc::AllOps() const {
return
res
;
}
void
BlockDesc
::
ClearBlock
()
{
// clear all ops
ops_
.
clear
();
// clear all vars which are not persistable
for
(
auto
it
=
vars_
.
begin
();
it
!=
vars_
.
end
();)
{
if
(
it
->
second
->
Persistable
())
{
++
it
;
}
else
{
vars_
.
erase
(
it
++
);
}
}
}
void
BlockDesc
::
Flush
()
{
for
(
auto
&
op_desc
:
ops_
)
{
op_desc
->
Flush
();
...
...
paddle/fluid/framework/block_desc.h
浏览文件 @
f53e1d5c
...
...
@@ -97,6 +97,8 @@ class BlockDesc {
std
::
vector
<
OpDesc
*>
AllOps
()
const
;
void
ClearBlock
();
size_t
OpSize
()
const
{
return
ops_
.
size
();
}
OpDesc
*
Op
(
int
idx
)
const
{
return
ops_
.
at
(
idx
).
get
();
}
...
...
paddle/fluid/imperative/layer.h
浏览文件 @
f53e1d5c
...
...
@@ -103,7 +103,9 @@ class OpBase;
*/
class
VarBase
{
public:
VarBase
(
std
::
string
name
)
:
VarBase
(
new
framework
::
Variable
(),
new
VarBase
(
name
+
"XGRAD"
,
true
),
name
)
{}
explicit
VarBase
(
std
::
string
name
)
:
VarBase
(
new
framework
::
Variable
(),
new
VarBase
(
name
+
"XGRAD"
,
true
),
name
)
{}
// Owns `var` and `grad`
VarBase
(
framework
::
Variable
*
var
,
VarBase
*
grad
,
std
::
string
name
)
...
...
@@ -113,7 +115,7 @@ class VarBase {
stop_gradient_
(
false
),
pre_op_
(
nullptr
),
pre_op_out_idx_
(
-
1
),
name_
(
name
)
{
LOG
(
ERROR
)
<<
"create "
<<
name
;
}
name_
(
name
)
{}
explicit
VarBase
(
std
::
string
name
,
bool
stop_gradient
)
:
var_desc_
(
nullptr
),
...
...
@@ -122,11 +124,9 @@ class VarBase {
stop_gradient_
(
stop_gradient
),
pre_op_
(
nullptr
),
pre_op_out_idx_
(
-
1
),
name_
(
name
)
{
LOG
(
ERROR
)
<<
"create "
<<
name
;
}
name_
(
name
)
{}
virtual
~
VarBase
()
{
LOG
(
ERROR
)
<<
"delete "
<<
name_
;
if
(
var_
)
{
delete
var_
;
}
...
...
paddle/fluid/imperative/tracer.cc
浏览文件 @
f53e1d5c
...
...
@@ -66,16 +66,38 @@ platform::Place GetExpectedPlace(platform::Place place, VarBasePtrMap inputs) {
return
result
;
}
// framework::BlockDesc* InferShapeAndVarType(OpBase* op, const VarBasePtrMap&
// inputs, const VarBasePtrMap& outputs) {
// std::unique_ptr<BlockDesc> block(new BlockDesc());
// // construct op desc
// op->op_desc_ = block.AppendOp();
// // construct op inputs and outputs
// // for
// //
// for (auto it = )
// op->op_desc_->SetInput()
// op->op_desc_->InferShape(*block);
// op->op_desc_->InferVarType(block.get());
// return block.release();
// }
void
Tracer
::
Trace
(
OpBase
*
op
,
const
VarBasePtrMap
&
inputs
,
const
VarBasePtrMap
&
outputs
,
framework
::
BlockDesc
*
block
,
const
platform
::
Place
expected_place
,
const
bool
stop_gradient
)
{
std
::
map
<
std
::
string
,
VarBase
*>
vars
;
// framework::BlockDesc* block = InferShapeAndVarType(op, inputs, outputs);
framework
::
OpDesc
*
op_desc
=
op
->
op_desc_
;
VLOG
(
3
)
<<
"tracer tracing "
<<
op_desc
->
Type
();
op_desc
->
InferShape
(
*
block
);
op_desc
->
InferVarType
(
block
);
std
::
unique_ptr
<
framework
::
OperatorBase
>
op_base
=
framework
::
OpRegistry
::
CreateOp
(
*
op_desc
);
...
...
@@ -92,7 +114,7 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs,
invars
.
emplace_back
(
inp
->
var_
);
vars
[
inp
->
var_desc_
->
Name
()]
=
inp
;
if
(
inp
->
PreOp
())
{
if
(
inp
->
PreOp
()
&&
!
inp
->
IsStopGradient
()
)
{
op
->
pre_ops_
[
it
.
first
].
push_back
(
inp
->
PreOp
());
op
->
pre_ops_out_idx_
[
it
.
first
].
push_back
(
inp
->
PreOpOutIdx
());
}
else
{
...
...
@@ -202,7 +224,7 @@ std::vector<VarBase*> Tracer::PyTrace(OpBase* op,
op
->
input_vars_
[
PyLayer
::
kFwdInp
]
=
inputs
;
op
->
output_vars_
[
PyLayer
::
kFwdOut
]
=
PyLayer
::
Apply
(
op
->
forward_id_
,
inputs
);
for
(
VarBase
*
inp
:
inputs
)
{
if
(
inp
->
PreOp
())
{
if
(
inp
->
PreOp
()
&&
!
inp
->
IsStopGradient
()
)
{
op
->
pre_ops_
[
PyLayer
::
kFwdInp
].
push_back
(
inp
->
PreOp
());
op
->
pre_ops_out_idx_
[
PyLayer
::
kFwdInp
].
push_back
(
inp
->
PreOpOutIdx
());
}
else
{
...
...
paddle/fluid/pybind/protobuf.cc
浏览文件 @
f53e1d5c
...
...
@@ -189,6 +189,9 @@ void BindBlockDesc(pybind11::module *m) {
return
self
.
HasVar
(
name
);
},
pybind11
::
return_value_policy
::
reference
)
.
def
(
"_clear_block"
,
[](
pd
::
BlockDesc
&
self
)
{
return
self
.
ClearBlock
();
},
pybind11
::
return_value_policy
::
reference
)
.
def
(
"_rename_var"
,
[](
pd
::
BlockDesc
&
self
,
const
pybind11
::
bytes
&
byte_name
,
const
pybind11
::
bytes
&
byte_name_new
)
{
...
...
python/paddle/fluid/framework.py
浏览文件 @
f53e1d5c
...
...
@@ -1188,6 +1188,15 @@ class Block(object):
else
:
raise
ValueError
(
"Var {0} is not found recursively"
.
format
(
name
))
def
_clear_block
(
self
):
self
.
desc
.
_clear_block
()
for
name
,
var
in
self
.
vars
.
items
():
if
not
var
.
persistable
:
del
self
.
vars
[
name
]
self
.
ops
.
clear
()
def
all_parameters
(
self
):
return
list
(
self
.
iter_parameters
())
...
...
@@ -1273,7 +1282,6 @@ class Block(object):
return
var
def
_remove_var
(
self
,
name
):
if
not
_in_imperative_mode
():
self
.
_sync_with_cpp
()
self
.
desc
.
_remove_var
(
cpt
.
to_bytes
(
name
))
del
self
.
vars
[
name
]
...
...
@@ -1358,7 +1366,6 @@ class Block(object):
Returns:
None
"""
if
not
_in_imperative_mode
():
self
.
_sync_with_cpp
()
self
.
desc
.
_remove_op
(
index
,
index
+
1
)
del
self
.
ops
[
index
]
...
...
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
浏览文件 @
f53e1d5c
...
...
@@ -101,7 +101,8 @@ class MNIST(fluid.imperative.Layer):
class
TestImperativeMnist
(
unittest
.
TestCase
):
def
test_mnist_float32
(
self
):
seed
=
90
batch_num
=
100000
epoch_num
=
1
batch_num
=
200
with
fluid
.
imperative
.
guard
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
...
...
@@ -109,125 +110,112 @@ class TestImperativeMnist(unittest.TestCase):
mnist
=
MNIST
()
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
)
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
,
drop_last
=
True
)
dy_param_init_value
=
{}
for
epoch
in
range
(
epoch_num
):
print
(
"epoch"
,
epoch
)
for
batch_id
,
data
in
enumerate
(
train_reader
()):
if
batch_id
>=
batch_num
:
break
#
if batch_id >= batch_num:
#
break
dy_x_data
=
np
.
array
(
[
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
128
,
1
)
[
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
(
[
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
128
,
1
)
img
=
to_variable
(
dy_x_data
)
label
=
to_variable
(
y_data
)
label
.
_stop_gradient
=
True
print
(
"forward start"
)
cost
=
mnist
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
cost
,
label
)
avg_loss
=
fluid
.
layers
.
mean
(
loss
)
# dy_out = avg_loss._numpy()
print
(
"forward end"
)
# if batch_id == 0:
# for param in fluid.default_main_program().global_block(
# ).all_parameters():
# dy_param_init_value[param.name] = param._numpy()
dy_out
=
avg_loss
.
_numpy
()
if
epoch
==
0
and
batch_id
==
0
:
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
dy_param_init_value
[
param
.
name
]
=
param
.
_numpy
()
avg_loss
.
_backward
()
sgd
.
minimize
(
avg_loss
)
mnist
.
clear_gradients
()
print
(
"backward end"
)
fluid
.
default_main_program
().
global_block
().
_clear_block
(
)
sgd
.
minimize
(
avg_loss
)
dy_param_value
=
{}
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
dy_param_value
[
param
.
name
]
=
param
.
_numpy
()
print
(
"sgd end"
)
with
new_program_scope
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
mnist
.
clear_gradients
()
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
import
gc
for
name
,
var
in
fluid
.
default_main_program
().
global_block
().
vars
.
items
():
if
not
var
.
persistable
:
fluid
.
default_main_program
().
global_block
().
_remove_var
(
name
)
# var._ivar._clear_values()
for
op
in
fluid
.
default_main_program
().
global_block
().
ops
:
fluid
.
default_main_program
().
global_block
().
_remove_op
(
op
.
idx
)
assert
len
(
gc
.
get_referrers
(
avg_loss
))
==
1
print
(
"clear end"
)
print
(
"ivar ref "
,
gc
.
get_referrers
(
gc
.
get_referrers
(
avg_loss
.
_ivar
)[
0
])[
0
].
__class__
.
__name__
)
print
(
"ivar ref "
,
gc
.
get_referrers
(
gc
.
get_referrers
(
avg_loss
.
_ivar
)[
1
])[
0
].
__class__
.
__name__
)
# dy_param_value = {}
# for param in fluid.default_main_program().global_block(
# ).all_parameters():
# dy_param_value[param.name] = param._numpy()
# with new_program_scope():
# fluid.default_startup_program().random_seed = seed
# fluid.default_main_program().random_seed = seed
# exe = fluid.Executor(fluid.CPUPlace(
# ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
# mnist = MNIST()
# sgd = SGDOptimizer(learning_rate=1e-3)
# train_reader = paddle.batch(
# paddle.dataset.mnist.train(), batch_size=128)
# img = fluid.layers.data(
# name='pixel', shape=[1, 28, 28], dtype='float32')
# label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# cost = mnist(img)
# loss = fluid.layers.cross_entropy(cost, label)
# avg_loss = fluid.layers.mean(loss)
# sgd.minimize(avg_loss)
# # initialize params and fetch them
# static_param_init_value = {}
# static_param_name_list = []
# for param in fluid.default_startup_program().global_block(
# ).all_parameters():
# static_param_name_list.append(param.name)
# out = exe.run(fluid.default_startup_program(),
# fetch_list=static_param_name_list)
# for i in range(len(static_param_name_list)):
# static_param_init_value[static_param_name_list[i]] = out[i]
# for batch_id, data in enumerate(train_reader()):
# if batch_id >= batch_num:
# break
mnist
=
MNIST
()
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
,
drop_last
=
True
)
# static_x_data = np.array(
# [x[0].reshape(1, 28, 28) for x in data]).astype('float32')
# y_data = np.array([x[1] for x in data]).astype('int64').reshape(
# [128, 1])
img
=
fluid
.
layers
.
data
(
name
=
'pixel'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
cost
=
mnist
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
cost
,
label
)
avg_loss
=
fluid
.
layers
.
mean
(
loss
)
sgd
.
minimize
(
avg_loss
)
# fetch_list = [avg_loss.name]
# fetch_list.extend(static_param_name_list)
# out = exe.run(fluid.default_main_program(),
# feed={"pixel": static_x_data,
# "label": y_data},
# fetch_list=fetch_list
)
# initialize params and fetch them
static_param_init_value
=
{}
static_param_name_list
=
[]
for
param
in
fluid
.
default_startup_program
().
global_block
(
).
all_parameters
():
static_param_name_list
.
append
(
param
.
name
)
# static_param_value = {}
# static_out = out[0]
# for i in range(1, len(out)):
# static_param_value[static_param_name_list[i - 1]] = out[i]
out
=
exe
.
run
(
fluid
.
default_startup_program
(),
fetch_list
=
static_param_name_list
)
# for key, value in six.iteritems(static_param_init_value
):
# self.assertTrue(np.allclose(value, dy_param_init_value[key]))
for
i
in
range
(
len
(
static_param_name_list
)
):
static_param_init_value
[
static_param_name_list
[
i
]]
=
out
[
i
]
# self.assertTrue(np.allclose(static_out, dy_out))
for
epoch
in
range
(
epoch_num
):
for
batch_id
,
data
in
enumerate
(
train_reader
()):
# if batch_id >= batch_num:
# break
# for key, value in six.iteritems(static_param_value):
# self.assertTrue(np.allclose(value, dy_param_value[key]))
static_x_data
=
np
.
array
(
[
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
(
[
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
([
128
,
1
])
fetch_list
=
[
avg_loss
.
name
]
fetch_list
.
extend
(
static_param_name_list
)
out
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"pixel"
:
static_x_data
,
"label"
:
y_data
},
fetch_list
=
fetch_list
)
static_param_value
=
{}
static_out
=
out
[
0
]
for
i
in
range
(
1
,
len
(
out
)):
static_param_value
[
static_param_name_list
[
i
-
1
]]
=
out
[
i
]
for
key
,
value
in
six
.
iteritems
(
static_param_init_value
):
self
.
assertTrue
(
np
.
allclose
(
value
,
dy_param_init_value
[
key
]))
self
.
assertTrue
(
np
.
allclose
(
static_out
,
dy_out
))
for
key
,
value
in
six
.
iteritems
(
static_param_value
):
self
.
assertTrue
(
np
.
allclose
(
value
,
dy_param_value
[
key
]))
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录