Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
f53e1d5c
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f53e1d5c
编写于
2月 20, 2019
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
implement ClearBlock
上级
52e5ee60
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
152 addition
and
116 deletion
+152
-116
paddle/fluid/framework/block_desc.cc
paddle/fluid/framework/block_desc.cc
+14
-0
paddle/fluid/framework/block_desc.h
paddle/fluid/framework/block_desc.h
+2
-0
paddle/fluid/imperative/layer.h
paddle/fluid/imperative/layer.h
+5
-5
paddle/fluid/imperative/tracer.cc
paddle/fluid/imperative/tracer.cc
+24
-2
paddle/fluid/pybind/protobuf.cc
paddle/fluid/pybind/protobuf.cc
+3
-0
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+11
-4
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
...paddle/fluid/tests/unittests/test_imperative_optimizer.py
+93
-105
未找到文件。
paddle/fluid/framework/block_desc.cc
浏览文件 @
f53e1d5c
...
@@ -163,6 +163,20 @@ std::vector<OpDesc *> BlockDesc::AllOps() const {
...
@@ -163,6 +163,20 @@ std::vector<OpDesc *> BlockDesc::AllOps() const {
return
res
;
return
res
;
}
}
void
BlockDesc
::
ClearBlock
()
{
// clear all ops
ops_
.
clear
();
// clear all vars which are not persistable
for
(
auto
it
=
vars_
.
begin
();
it
!=
vars_
.
end
();)
{
if
(
it
->
second
->
Persistable
())
{
++
it
;
}
else
{
vars_
.
erase
(
it
++
);
}
}
}
void
BlockDesc
::
Flush
()
{
void
BlockDesc
::
Flush
()
{
for
(
auto
&
op_desc
:
ops_
)
{
for
(
auto
&
op_desc
:
ops_
)
{
op_desc
->
Flush
();
op_desc
->
Flush
();
...
...
paddle/fluid/framework/block_desc.h
浏览文件 @
f53e1d5c
...
@@ -97,6 +97,8 @@ class BlockDesc {
...
@@ -97,6 +97,8 @@ class BlockDesc {
std
::
vector
<
OpDesc
*>
AllOps
()
const
;
std
::
vector
<
OpDesc
*>
AllOps
()
const
;
void
ClearBlock
();
size_t
OpSize
()
const
{
return
ops_
.
size
();
}
size_t
OpSize
()
const
{
return
ops_
.
size
();
}
OpDesc
*
Op
(
int
idx
)
const
{
return
ops_
.
at
(
idx
).
get
();
}
OpDesc
*
Op
(
int
idx
)
const
{
return
ops_
.
at
(
idx
).
get
();
}
...
...
paddle/fluid/imperative/layer.h
浏览文件 @
f53e1d5c
...
@@ -103,7 +103,9 @@ class OpBase;
...
@@ -103,7 +103,9 @@ class OpBase;
*/
*/
class
VarBase
{
class
VarBase
{
public:
public:
VarBase
(
std
::
string
name
)
:
VarBase
(
new
framework
::
Variable
(),
new
VarBase
(
name
+
"XGRAD"
,
true
),
name
)
{}
explicit
VarBase
(
std
::
string
name
)
:
VarBase
(
new
framework
::
Variable
(),
new
VarBase
(
name
+
"XGRAD"
,
true
),
name
)
{}
// Owns `var` and `grad`
// Owns `var` and `grad`
VarBase
(
framework
::
Variable
*
var
,
VarBase
*
grad
,
std
::
string
name
)
VarBase
(
framework
::
Variable
*
var
,
VarBase
*
grad
,
std
::
string
name
)
...
@@ -113,7 +115,7 @@ class VarBase {
...
@@ -113,7 +115,7 @@ class VarBase {
stop_gradient_
(
false
),
stop_gradient_
(
false
),
pre_op_
(
nullptr
),
pre_op_
(
nullptr
),
pre_op_out_idx_
(
-
1
),
pre_op_out_idx_
(
-
1
),
name_
(
name
)
{
LOG
(
ERROR
)
<<
"create "
<<
name
;
}
name_
(
name
)
{}
explicit
VarBase
(
std
::
string
name
,
bool
stop_gradient
)
explicit
VarBase
(
std
::
string
name
,
bool
stop_gradient
)
:
var_desc_
(
nullptr
),
:
var_desc_
(
nullptr
),
...
@@ -122,11 +124,9 @@ class VarBase {
...
@@ -122,11 +124,9 @@ class VarBase {
stop_gradient_
(
stop_gradient
),
stop_gradient_
(
stop_gradient
),
pre_op_
(
nullptr
),
pre_op_
(
nullptr
),
pre_op_out_idx_
(
-
1
),
pre_op_out_idx_
(
-
1
),
name_
(
name
)
{
LOG
(
ERROR
)
<<
"create "
<<
name
;
}
name_
(
name
)
{}
virtual
~
VarBase
()
{
virtual
~
VarBase
()
{
LOG
(
ERROR
)
<<
"delete "
<<
name_
;
if
(
var_
)
{
if
(
var_
)
{
delete
var_
;
delete
var_
;
}
}
...
...
paddle/fluid/imperative/tracer.cc
浏览文件 @
f53e1d5c
...
@@ -66,16 +66,38 @@ platform::Place GetExpectedPlace(platform::Place place, VarBasePtrMap inputs) {
...
@@ -66,16 +66,38 @@ platform::Place GetExpectedPlace(platform::Place place, VarBasePtrMap inputs) {
return
result
;
return
result
;
}
}
// framework::BlockDesc* InferShapeAndVarType(OpBase* op, const VarBasePtrMap&
// inputs, const VarBasePtrMap& outputs) {
// std::unique_ptr<BlockDesc> block(new BlockDesc());
// // construct op desc
// op->op_desc_ = block.AppendOp();
// // construct op inputs and outputs
// // for
// //
// for (auto it = )
// op->op_desc_->SetInput()
// op->op_desc_->InferShape(*block);
// op->op_desc_->InferVarType(block.get());
// return block.release();
// }
void
Tracer
::
Trace
(
OpBase
*
op
,
const
VarBasePtrMap
&
inputs
,
void
Tracer
::
Trace
(
OpBase
*
op
,
const
VarBasePtrMap
&
inputs
,
const
VarBasePtrMap
&
outputs
,
framework
::
BlockDesc
*
block
,
const
VarBasePtrMap
&
outputs
,
framework
::
BlockDesc
*
block
,
const
platform
::
Place
expected_place
,
const
platform
::
Place
expected_place
,
const
bool
stop_gradient
)
{
const
bool
stop_gradient
)
{
std
::
map
<
std
::
string
,
VarBase
*>
vars
;
std
::
map
<
std
::
string
,
VarBase
*>
vars
;
// framework::BlockDesc* block = InferShapeAndVarType(op, inputs, outputs);
framework
::
OpDesc
*
op_desc
=
op
->
op_desc_
;
framework
::
OpDesc
*
op_desc
=
op
->
op_desc_
;
VLOG
(
3
)
<<
"tracer tracing "
<<
op_desc
->
Type
();
VLOG
(
3
)
<<
"tracer tracing "
<<
op_desc
->
Type
();
op_desc
->
InferShape
(
*
block
);
op_desc
->
InferShape
(
*
block
);
op_desc
->
InferVarType
(
block
);
op_desc
->
InferVarType
(
block
);
std
::
unique_ptr
<
framework
::
OperatorBase
>
op_base
=
std
::
unique_ptr
<
framework
::
OperatorBase
>
op_base
=
framework
::
OpRegistry
::
CreateOp
(
*
op_desc
);
framework
::
OpRegistry
::
CreateOp
(
*
op_desc
);
...
@@ -92,7 +114,7 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs,
...
@@ -92,7 +114,7 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs,
invars
.
emplace_back
(
inp
->
var_
);
invars
.
emplace_back
(
inp
->
var_
);
vars
[
inp
->
var_desc_
->
Name
()]
=
inp
;
vars
[
inp
->
var_desc_
->
Name
()]
=
inp
;
if
(
inp
->
PreOp
())
{
if
(
inp
->
PreOp
()
&&
!
inp
->
IsStopGradient
()
)
{
op
->
pre_ops_
[
it
.
first
].
push_back
(
inp
->
PreOp
());
op
->
pre_ops_
[
it
.
first
].
push_back
(
inp
->
PreOp
());
op
->
pre_ops_out_idx_
[
it
.
first
].
push_back
(
inp
->
PreOpOutIdx
());
op
->
pre_ops_out_idx_
[
it
.
first
].
push_back
(
inp
->
PreOpOutIdx
());
}
else
{
}
else
{
...
@@ -202,7 +224,7 @@ std::vector<VarBase*> Tracer::PyTrace(OpBase* op,
...
@@ -202,7 +224,7 @@ std::vector<VarBase*> Tracer::PyTrace(OpBase* op,
op
->
input_vars_
[
PyLayer
::
kFwdInp
]
=
inputs
;
op
->
input_vars_
[
PyLayer
::
kFwdInp
]
=
inputs
;
op
->
output_vars_
[
PyLayer
::
kFwdOut
]
=
PyLayer
::
Apply
(
op
->
forward_id_
,
inputs
);
op
->
output_vars_
[
PyLayer
::
kFwdOut
]
=
PyLayer
::
Apply
(
op
->
forward_id_
,
inputs
);
for
(
VarBase
*
inp
:
inputs
)
{
for
(
VarBase
*
inp
:
inputs
)
{
if
(
inp
->
PreOp
())
{
if
(
inp
->
PreOp
()
&&
!
inp
->
IsStopGradient
()
)
{
op
->
pre_ops_
[
PyLayer
::
kFwdInp
].
push_back
(
inp
->
PreOp
());
op
->
pre_ops_
[
PyLayer
::
kFwdInp
].
push_back
(
inp
->
PreOp
());
op
->
pre_ops_out_idx_
[
PyLayer
::
kFwdInp
].
push_back
(
inp
->
PreOpOutIdx
());
op
->
pre_ops_out_idx_
[
PyLayer
::
kFwdInp
].
push_back
(
inp
->
PreOpOutIdx
());
}
else
{
}
else
{
...
...
paddle/fluid/pybind/protobuf.cc
浏览文件 @
f53e1d5c
...
@@ -189,6 +189,9 @@ void BindBlockDesc(pybind11::module *m) {
...
@@ -189,6 +189,9 @@ void BindBlockDesc(pybind11::module *m) {
return
self
.
HasVar
(
name
);
return
self
.
HasVar
(
name
);
},
},
pybind11
::
return_value_policy
::
reference
)
pybind11
::
return_value_policy
::
reference
)
.
def
(
"_clear_block"
,
[](
pd
::
BlockDesc
&
self
)
{
return
self
.
ClearBlock
();
},
pybind11
::
return_value_policy
::
reference
)
.
def
(
"_rename_var"
,
.
def
(
"_rename_var"
,
[](
pd
::
BlockDesc
&
self
,
const
pybind11
::
bytes
&
byte_name
,
[](
pd
::
BlockDesc
&
self
,
const
pybind11
::
bytes
&
byte_name
,
const
pybind11
::
bytes
&
byte_name_new
)
{
const
pybind11
::
bytes
&
byte_name_new
)
{
...
...
python/paddle/fluid/framework.py
浏览文件 @
f53e1d5c
...
@@ -1188,6 +1188,15 @@ class Block(object):
...
@@ -1188,6 +1188,15 @@ class Block(object):
else
:
else
:
raise
ValueError
(
"Var {0} is not found recursively"
.
format
(
name
))
raise
ValueError
(
"Var {0} is not found recursively"
.
format
(
name
))
def
_clear_block
(
self
):
self
.
desc
.
_clear_block
()
for
name
,
var
in
self
.
vars
.
items
():
if
not
var
.
persistable
:
del
self
.
vars
[
name
]
self
.
ops
.
clear
()
def
all_parameters
(
self
):
def
all_parameters
(
self
):
return
list
(
self
.
iter_parameters
())
return
list
(
self
.
iter_parameters
())
...
@@ -1273,8 +1282,7 @@ class Block(object):
...
@@ -1273,8 +1282,7 @@ class Block(object):
return
var
return
var
def
_remove_var
(
self
,
name
):
def
_remove_var
(
self
,
name
):
if
not
_in_imperative_mode
():
self
.
_sync_with_cpp
()
self
.
_sync_with_cpp
()
self
.
desc
.
_remove_var
(
cpt
.
to_bytes
(
name
))
self
.
desc
.
_remove_var
(
cpt
.
to_bytes
(
name
))
del
self
.
vars
[
name
]
del
self
.
vars
[
name
]
...
@@ -1358,8 +1366,7 @@ class Block(object):
...
@@ -1358,8 +1366,7 @@ class Block(object):
Returns:
Returns:
None
None
"""
"""
if
not
_in_imperative_mode
():
self
.
_sync_with_cpp
()
self
.
_sync_with_cpp
()
self
.
desc
.
_remove_op
(
index
,
index
+
1
)
self
.
desc
.
_remove_op
(
index
,
index
+
1
)
del
self
.
ops
[
index
]
del
self
.
ops
[
index
]
...
...
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
浏览文件 @
f53e1d5c
...
@@ -101,7 +101,8 @@ class MNIST(fluid.imperative.Layer):
...
@@ -101,7 +101,8 @@ class MNIST(fluid.imperative.Layer):
class
TestImperativeMnist
(
unittest
.
TestCase
):
class
TestImperativeMnist
(
unittest
.
TestCase
):
def
test_mnist_float32
(
self
):
def
test_mnist_float32
(
self
):
seed
=
90
seed
=
90
batch_num
=
100000
epoch_num
=
1
batch_num
=
200
with
fluid
.
imperative
.
guard
():
with
fluid
.
imperative
.
guard
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
...
@@ -109,125 +110,112 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -109,125 +110,112 @@ class TestImperativeMnist(unittest.TestCase):
mnist
=
MNIST
()
mnist
=
MNIST
()
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
train_reader
=
paddle
.
batch
(
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
)
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
,
drop_last
=
True
)
dy_param_init_value
=
{}
dy_param_init_value
=
{}
for
batch_id
,
data
in
enumerate
(
train_reader
()):
for
epoch
in
range
(
epoch_num
):
if
batch_id
>=
batch_num
:
print
(
"epoch"
,
epoch
)
break
for
batch_id
,
data
in
enumerate
(
train_reader
()):
# if batch_id >= batch_num:
dy_x_data
=
np
.
array
(
# break
[
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
128
,
1
)
img
=
to_variable
(
dy_x_data
)
label
=
to_variable
(
y_data
)
label
.
_stop_gradient
=
True
print
(
"forward start"
)
cost
=
mnist
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
cost
,
label
)
avg_loss
=
fluid
.
layers
.
mean
(
loss
)
# dy_out = avg_loss._numpy()
print
(
"forward end"
)
# if batch_id == 0:
# for param in fluid.default_main_program().global_block(
# ).all_parameters():
# dy_param_init_value[param.name] = param._numpy()
avg_loss
.
_backward
()
print
(
"backward end"
)
sgd
.
minimize
(
avg_loss
)
print
(
"sgd end"
)
mnist
.
clear_gradients
()
import
gc
for
name
,
var
in
fluid
.
default_main_program
().
global_block
().
vars
.
items
():
if
not
var
.
persistable
:
fluid
.
default_main_program
().
global_block
().
_remove_var
(
name
)
# var._ivar._clear_values()
for
op
in
fluid
.
default_main_program
().
global_block
().
ops
:
fluid
.
default_main_program
().
global_block
().
_remove_op
(
op
.
idx
)
assert
len
(
gc
.
get_referrers
(
avg_loss
))
==
1
dy_x_data
=
np
.
array
(
[
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
(
[
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
128
,
1
)
print
(
"clear end"
)
img
=
to_variable
(
dy_x_data
)
print
(
"ivar ref "
,
gc
.
get_referrers
(
gc
.
get_referrers
(
avg_loss
.
_ivar
)[
0
])[
0
].
__class__
.
__name__
)
label
=
to_variable
(
y_data
)
print
(
"ivar ref "
,
gc
.
get_referrers
(
gc
.
get_referrers
(
avg_loss
.
_ivar
)[
1
])[
0
].
__class__
.
__name__
)
label
.
_stop_gradient
=
True
# dy_param_value = {}
cost
=
mnist
(
img
)
# for param in fluid.default_main_program().global_block(
loss
=
fluid
.
layers
.
cross_entropy
(
cost
,
label
)
# ).all_parameters():
avg_loss
=
fluid
.
layers
.
mean
(
loss
)
# dy_param_value[param.name] = param._numpy()
# with new_program_scope():
dy_out
=
avg_loss
.
_numpy
()
# fluid.default_startup_program().random_seed = seed
# fluid.default_main_program().random_seed = seed
# exe = fluid.Executor(fluid.CPUPlace(
if
epoch
==
0
and
batch_id
==
0
:
# ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
dy_param_init_value
[
param
.
name
]
=
param
.
_numpy
()
# mnist = MNIST()
avg_loss
.
_backward
()
# sgd = SGDOptimizer(learning_rate=1e-3)
sgd
.
minimize
(
avg_loss
)
# train_reader = paddle.batch(
mnist
.
clear_gradients
()
# paddle.dataset.mnist.train(), batch_size=128)
# img = fluid.layers.data(
fluid
.
default_main_program
().
global_block
().
_clear_block
()
# name='pixel', shape=[1, 28, 28], dtype='float32')
# label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# cost = mnist(img)
# loss = fluid.layers.cross_entropy(cost, label)
# avg_loss = fluid.layers.mean(loss)
# sgd.minimize(avg_loss)
# # initialize params and fetch them
dy_param_value
=
{}
# static_param_init_value = {}
for
param
in
fluid
.
default_main_program
().
global_block
(
# static_param_name_list = []
).
all_parameters
():
# for param in fluid.default_startup_program().global_block(
dy_param_value
[
param
.
name
]
=
param
.
_numpy
()
# ).all_parameters():
# static_param_name_list.append(param.name)
# out = exe.run(fluid.default_startup_program(),
with
new_program_scope
():
# fetch_list=static_param_name_list)
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
# for i in range(len(static_param_name_list)):
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
# static_param_init_value[static_param_name_list[i]] = out[i]
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
# for batch_id, data in enumerate(train_reader()):
mnist
=
MNIST
()
# if batch_id >= batch_num:
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
,
drop_last
=
True
)
img
=
fluid
.
layers
.
data
(
name
=
'pixel'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
cost
=
mnist
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
cost
,
label
)
avg_loss
=
fluid
.
layers
.
mean
(
loss
)
sgd
.
minimize
(
avg_loss
)
# initialize params and fetch them
static_param_init_value
=
{}
static_param_name_list
=
[]
for
param
in
fluid
.
default_startup_program
().
global_block
(
).
all_parameters
():
static_param_name_list
.
append
(
param
.
name
)
out
=
exe
.
run
(
fluid
.
default_startup_program
(),
fetch_list
=
static_param_name_list
)
for
i
in
range
(
len
(
static_param_name_list
)):
static_param_init_value
[
static_param_name_list
[
i
]]
=
out
[
i
]
for
epoch
in
range
(
epoch_num
):
for
batch_id
,
data
in
enumerate
(
train_reader
()):
# if batch_id >= batch_num:
# break
# break
# static_x_data = np.array(
static_x_data
=
np
.
array
(
# [x[0].reshape(1, 28, 28) for x in data]).astype('float32')
[
x
[
0
].
reshape
(
1
,
28
,
28
)
# y_data = np.array([x[1] for x in data]).astype('int64').reshape(
for
x
in
data
]).
astype
(
'float32'
)
# [128, 1])
y_data
=
np
.
array
(
[
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
([
128
,
1
])
# fetch_list = [avg_loss.name]
# fetch_list.extend(static_param_name_list)
fetch_list
=
[
avg_loss
.
name
]
# out = exe.run(fluid.default_main_program(),
fetch_list
.
extend
(
static_param_name_list
)
# feed={"pixel": static_x_data,
out
=
exe
.
run
(
# "label": y_data},
fluid
.
default_main_program
(),
# fetch_list=fetch_list)
feed
=
{
"pixel"
:
static_x_data
,
"label"
:
y_data
},
# static_param_value = {}
fetch_list
=
fetch_list
)
# static_out = out[0]
# for i in range(1, len(out)):
static_param_value
=
{}
# static_param_value[static_param_name_list[i - 1]] = out[i]
static_out
=
out
[
0
]
for
i
in
range
(
1
,
len
(
out
)):
# for key, value in six.iteritems(static_param_init_value):
static_param_value
[
static_param_name_list
[
i
-
1
]]
=
out
[
# self.assertTrue(np.allclose(value, dy_param_init_value[key]))
i
]
# self.assertTrue(np.allclose(static_out, dy_out))
for
key
,
value
in
six
.
iteritems
(
static_param_init_value
):
self
.
assertTrue
(
np
.
allclose
(
value
,
dy_param_init_value
[
key
]))
# for key, value in six.iteritems(static_param_value):
# self.assertTrue(np.allclose(value, dy_param_value[key]))
self
.
assertTrue
(
np
.
allclose
(
static_out
,
dy_out
))
for
key
,
value
in
six
.
iteritems
(
static_param_value
):
self
.
assertTrue
(
np
.
allclose
(
value
,
dy_param_value
[
key
]))
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录