Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
1c9c8e8d
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1c9c8e8d
编写于
7月 12, 2018
作者:
Y
Yu Yang
提交者:
GitHub
7月 12, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #12065 from reyoung/feature/hide_apis
Add deprecated annotation and hide a lot of APIs
上级
e568acbe
ff07af8d
变更
13
显示空白变更内容
内联
并排
Showing
13 changed file
with
90 addition
and
48 deletion
+90
-48
python/paddle/fluid/annotations.py
python/paddle/fluid/annotations.py
+38
-0
python/paddle/fluid/backward.py
python/paddle/fluid/backward.py
+1
-4
python/paddle/fluid/layers/device.py
python/paddle/fluid/layers/device.py
+3
-1
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+12
-12
python/paddle/fluid/tests/book/notest_understand_sentiment.py
...on/paddle/fluid/tests/book/notest_understand_sentiment.py
+2
-2
python/paddle/fluid/tests/book/test_recognize_digits.py
python/paddle/fluid/tests/book/test_recognize_digits.py
+10
-8
python/paddle/fluid/tests/book/test_word2vec.py
python/paddle/fluid/tests/book/test_word2vec.py
+2
-1
python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py
.../tests/book_memory_optimization/test_memopt_fit_a_line.py
+5
-4
python/paddle/fluid/tests/unittests/test_calc_gradient.py
python/paddle/fluid/tests/unittests/test_calc_gradient.py
+0
-2
python/paddle/fluid/tests/unittests/test_get_places_op.py
python/paddle/fluid/tests/unittests/test_get_places_op.py
+2
-1
python/paddle/fluid/tests/unittests/test_layers.py
python/paddle/fluid/tests/unittests/test_layers.py
+2
-1
python/paddle/fluid/tests/unittests/test_optimizer.py
python/paddle/fluid/tests/unittests/test_optimizer.py
+11
-11
python/paddle/fluid/tests/unittests/test_parallel_op.py
python/paddle/fluid/tests/unittests/test_parallel_op.py
+2
-1
未找到文件。
python/paddle/fluid/annotations.py
0 → 100644
浏览文件 @
1c9c8e8d
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
functools
import
sys
__all__
=
[
'deprecated'
]
def
deprecated
(
since
,
instead
,
extra_message
=
""
):
def
decorator
(
func
):
err_msg
=
"API {0} is deprecated since {1}. Please use {2} instead."
.
format
(
func
.
__name__
,
since
,
instead
)
if
len
(
extra_message
)
!=
0
:
err_msg
+=
"
\n
"
err_msg
+=
extra_message
@
functools
.
wraps
(
func
)
def
wrapper
(
*
args
,
**
kwargs
):
print
>>
sys
.
stderr
,
err_msg
return
func
(
*
args
,
**
kwargs
)
wrapper
.
__doc__
+=
"
\n
"
wrapper
.
__doc__
+=
err_msg
return
wrapper
return
decorator
python/paddle/fluid/backward.py
浏览文件 @
1c9c8e8d
...
@@ -18,10 +18,7 @@ import collections
...
@@ -18,10 +18,7 @@ import collections
import
copy
import
copy
import
unique_name
import
unique_name
__all__
=
[
__all__
=
[
'append_backward'
]
'append_backward'
,
'calc_gradient'
,
]
def
_rename_arg_
(
op_descs
,
old_name
,
new_name
,
begin_idx
=
None
,
end_idx
=
None
):
def
_rename_arg_
(
op_descs
,
old_name
,
new_name
,
begin_idx
=
None
,
end_idx
=
None
):
...
...
python/paddle/fluid/layers/device.py
浏览文件 @
1c9c8e8d
...
@@ -18,10 +18,12 @@ All util layers.
...
@@ -18,10 +18,12 @@ All util layers.
from
layer_function_generator
import
autodoc
from
layer_function_generator
import
autodoc
from
..framework
import
unique_name
from
..framework
import
unique_name
from
..layer_helper
import
LayerHelper
from
..layer_helper
import
LayerHelper
from
..annotations
import
deprecated
__all__
=
[
'get_places'
]
__all__
=
[]
@
deprecated
(
since
=
'0.15.0'
,
instead
=
"ParallelExecutor"
)
@
autodoc
()
@
autodoc
()
def
get_places
(
device_count
=
None
,
device_type
=
None
):
def
get_places
(
device_count
=
None
,
device_type
=
None
):
helper
=
LayerHelper
(
'get_places'
,
**
locals
())
helper
=
LayerHelper
(
'get_places'
,
**
locals
())
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
1c9c8e8d
...
@@ -29,7 +29,7 @@ __all__ = [
...
@@ -29,7 +29,7 @@ __all__ = [
'SGD'
,
'Momentum'
,
'Adagrad'
,
'Adam'
,
'Adamax'
,
'DecayedAdagrad'
,
'Ftrl'
,
'SGD'
,
'Momentum'
,
'Adagrad'
,
'Adam'
,
'Adamax'
,
'DecayedAdagrad'
,
'Ftrl'
,
'SGDOptimizer'
,
'MomentumOptimizer'
,
'AdagradOptimizer'
,
'AdamOptimizer'
,
'SGDOptimizer'
,
'MomentumOptimizer'
,
'AdagradOptimizer'
,
'AdamOptimizer'
,
'AdamaxOptimizer'
,
'DecayedAdagradOptimizer'
,
'RMSPropOptimizer'
,
'AdamaxOptimizer'
,
'DecayedAdagradOptimizer'
,
'RMSPropOptimizer'
,
'FtrlOptimizer'
,
'Adadelta'
,
'ModelAverage'
,
'
Optimizer'
,
'
RMSPropOptimizer'
'FtrlOptimizer'
,
'Adadelta'
,
'ModelAverage'
,
'RMSPropOptimizer'
]
]
...
@@ -67,7 +67,7 @@ class Optimizer(object):
...
@@ -67,7 +67,7 @@ class Optimizer(object):
self
.
_LARS_weight_decay
=
LARS_weight_decay
self
.
_LARS_weight_decay
=
LARS_weight_decay
def
_create_global_learning_rate
(
self
):
def
_create_global_learning_rate
(
self
):
lr
=
self
.
global_learning_rate
()
lr
=
self
.
_
global_learning_rate
()
if
isinstance
(
lr
,
framework
.
Variable
):
if
isinstance
(
lr
,
framework
.
Variable
):
return
return
...
@@ -86,7 +86,7 @@ class Optimizer(object):
...
@@ -86,7 +86,7 @@ class Optimizer(object):
dtype
=
'float32'
if
self
.
_dtype
==
None
else
self
.
_dtype
,
dtype
=
'float32'
if
self
.
_dtype
==
None
else
self
.
_dtype
,
persistable
=
True
)
persistable
=
True
)
def
global_learning_rate
(
self
,
program
=
None
):
def
_
global_learning_rate
(
self
,
program
=
None
):
"""
"""
get global decayed learning rate
get global decayed learning rate
:return:
:return:
...
@@ -110,9 +110,9 @@ class Optimizer(object):
...
@@ -110,9 +110,9 @@ class Optimizer(object):
return
param_lr
return
param_lr
else
:
else
:
if
param_lr
==
1.0
:
if
param_lr
==
1.0
:
return
self
.
global_learning_rate
()
return
self
.
_
global_learning_rate
()
else
:
else
:
return
self
.
global_learning_rate
()
*
param_lr
return
self
.
_
global_learning_rate
()
*
param_lr
def
_create_accumulators
(
self
,
block
,
parameters
):
def
_create_accumulators
(
self
,
block
,
parameters
):
"""Create all accumulators needed by the parameters
"""Create all accumulators needed by the parameters
...
@@ -185,7 +185,7 @@ class Optimizer(object):
...
@@ -185,7 +185,7 @@ class Optimizer(object):
format
(
name
,
param
.
name
))
format
(
name
,
param
.
name
))
return
self
.
_accumulators
[
name
][
param
.
name
]
return
self
.
_accumulators
[
name
][
param
.
name
]
def
create_optimization_pass
(
self
,
def
_
create_optimization_pass
(
self
,
parameters_and_grads
,
parameters_and_grads
,
loss
,
loss
,
startup_program
=
None
):
startup_program
=
None
):
...
@@ -221,7 +221,7 @@ class Optimizer(object):
...
@@ -221,7 +221,7 @@ class Optimizer(object):
self
.
_create_global_learning_rate
()
self
.
_create_global_learning_rate
()
if
self
.
_LARS_weight_decay
>
0.0
:
if
self
.
_LARS_weight_decay
>
0.0
:
layers
.
append_LARS
(
parameters_and_grads
,
layers
.
append_LARS
(
parameters_and_grads
,
self
.
global_learning_rate
(),
self
.
_
global_learning_rate
(),
self
.
_LARS_weight_decay
)
self
.
_LARS_weight_decay
)
optimize_ops
=
[]
optimize_ops
=
[]
...
@@ -262,7 +262,7 @@ class Optimizer(object):
...
@@ -262,7 +262,7 @@ class Optimizer(object):
params_grads
=
append_regularization_ops
(
params_grads
,
params_grads
=
append_regularization_ops
(
params_grads
,
self
.
regularization
)
self
.
regularization
)
optimize_ops
=
self
.
create_optimization_pass
(
params_grads
,
loss
,
optimize_ops
=
self
.
_
create_optimization_pass
(
params_grads
,
loss
,
startup_program
)
startup_program
)
return
optimize_ops
,
params_grads
return
optimize_ops
,
params_grads
...
...
python/paddle/fluid/tests/book/notest_understand_sentiment.py
浏览文件 @
1c9c8e8d
...
@@ -12,7 +12,7 @@
...
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
from
__future__
import
print_function
from
__future__
import
print_function
from
paddle.fluid.layers.device
import
get_places
import
unittest
import
unittest
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle
import
paddle
...
@@ -144,7 +144,7 @@ def train(word_dict,
...
@@ -144,7 +144,7 @@ def train(word_dict,
cost
,
acc_out
,
prediction
=
net_method
(
cost
,
acc_out
,
prediction
=
net_method
(
data
,
label
,
input_dim
=
dict_dim
,
class_dim
=
class_dim
)
data
,
label
,
input_dim
=
dict_dim
,
class_dim
=
class_dim
)
else
:
else
:
places
=
fluid
.
layers
.
get_places
()
places
=
get_places
()
pd
=
fluid
.
layers
.
ParallelDo
(
places
)
pd
=
fluid
.
layers
.
ParallelDo
(
places
)
with
pd
.
do
():
with
pd
.
do
():
cost
,
acc
,
_
=
net_method
(
cost
,
acc
,
_
=
net_method
(
...
...
python/paddle/fluid/tests/book/test_recognize_digits.py
浏览文件 @
1c9c8e8d
...
@@ -12,15 +12,17 @@
...
@@ -12,15 +12,17 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
from
__future__
import
print_function
from
__future__
import
print_function
import
argparse
import
paddle.fluid
as
fluid
import
paddle
import
sys
import
numpy
import
unittest
import
math
import
math
import
sys
import
os
import
os
import
sys
import
unittest
import
numpy
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.layers.device
import
get_places
BATCH_SIZE
=
64
BATCH_SIZE
=
64
...
@@ -76,7 +78,7 @@ def train(nn_type,
...
@@ -76,7 +78,7 @@ def train(nn_type,
net_conf
=
conv_net
net_conf
=
conv_net
if
parallel
:
if
parallel
:
places
=
fluid
.
layers
.
get_places
()
places
=
get_places
()
pd
=
fluid
.
layers
.
ParallelDo
(
places
)
pd
=
fluid
.
layers
.
ParallelDo
(
places
)
with
pd
.
do
():
with
pd
.
do
():
img_
=
pd
.
read_input
(
img
)
img_
=
pd
.
read_input
(
img
)
...
...
python/paddle/fluid/tests/book/test_word2vec.py
浏览文件 @
1c9c8e8d
...
@@ -14,6 +14,7 @@
...
@@ -14,6 +14,7 @@
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.layers.device
import
get_places
import
unittest
import
unittest
import
os
import
os
import
numpy
as
np
import
numpy
as
np
...
@@ -80,7 +81,7 @@ def train(use_cuda, is_sparse, is_parallel, save_dirname, is_local=True):
...
@@ -80,7 +81,7 @@ def train(use_cuda, is_sparse, is_parallel, save_dirname, is_local=True):
avg_cost
,
predict_word
=
__network__
(
avg_cost
,
predict_word
=
__network__
(
[
first_word
,
second_word
,
third_word
,
forth_word
,
next_word
])
[
first_word
,
second_word
,
third_word
,
forth_word
,
next_word
])
else
:
else
:
places
=
fluid
.
layers
.
get_places
()
places
=
get_places
()
pd
=
fluid
.
layers
.
ParallelDo
(
places
)
pd
=
fluid
.
layers
.
ParallelDo
(
places
)
with
pd
.
do
():
with
pd
.
do
():
avg_cost
,
predict_word
=
__network__
(
avg_cost
,
predict_word
=
__network__
(
...
...
python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py
浏览文件 @
1c9c8e8d
...
@@ -12,12 +12,13 @@
...
@@ -12,12 +12,13 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
import
math
import
math
import
sys
import
sys
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.layers.device
import
get_places
# need to fix random seed and training data to compare the loss
# need to fix random seed and training data to compare the loss
# value accurately calculated by the default and the memory optimization
# value accurately calculated by the default and the memory optimization
# version.
# version.
...
@@ -34,7 +35,7 @@ if fluid.core.is_compiled_with_cuda():
...
@@ -34,7 +35,7 @@ if fluid.core.is_compiled_with_cuda():
use_nccl
=
False
use_nccl
=
False
place
=
fluid
.
CUDAPlace
(
0
)
place
=
fluid
.
CUDAPlace
(
0
)
places
=
fluid
.
layers
.
get_places
(
device_count
=
0
,
device_type
=
device_type
)
places
=
get_places
(
device_count
=
0
,
device_type
=
device_type
)
pd
=
fluid
.
layers
.
ParallelDo
(
places
,
use_nccl
=
use_nccl
)
pd
=
fluid
.
layers
.
ParallelDo
(
places
,
use_nccl
=
use_nccl
)
with
pd
.
do
():
with
pd
.
do
():
x_
=
pd
.
read_input
(
x
)
x_
=
pd
.
read_input
(
x
)
...
...
python/paddle/fluid/tests/unittests/test_calc_gradient.py
浏览文件 @
1c9c8e8d
...
@@ -16,8 +16,6 @@ import unittest
...
@@ -16,8 +16,6 @@ import unittest
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle.fluid.layers
as
layers
import
paddle.fluid.layers
as
layers
import
paddle.fluid.framework
as
framework
import
paddle.fluid.optimizer
as
optimizer
from
paddle.fluid.backward
import
calc_gradient
from
paddle.fluid.backward
import
calc_gradient
...
...
python/paddle/fluid/tests/unittests/test_get_places_op.py
浏览文件 @
1c9c8e8d
...
@@ -13,6 +13,7 @@
...
@@ -13,6 +13,7 @@
# limitations under the License.
# limitations under the License.
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.layers.device
import
get_places
import
decorators
import
decorators
import
unittest
import
unittest
...
@@ -20,7 +21,7 @@ import unittest
...
@@ -20,7 +21,7 @@ import unittest
class
TestGetPlaces
(
unittest
.
TestCase
):
class
TestGetPlaces
(
unittest
.
TestCase
):
@
decorators
.
prog_scope
()
@
decorators
.
prog_scope
()
def
test_get_places
(
self
):
def
test_get_places
(
self
):
places
=
fluid
.
layers
.
get_places
()
places
=
get_places
()
cpu
=
fluid
.
CPUPlace
()
cpu
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
cpu
)
exe
=
fluid
.
Executor
(
cpu
)
exe
.
run
(
fluid
.
default_main_program
())
exe
.
run
(
fluid
.
default_main_program
())
...
...
python/paddle/fluid/tests/unittests/test_layers.py
浏览文件 @
1c9c8e8d
...
@@ -16,6 +16,7 @@ from __future__ import print_function
...
@@ -16,6 +16,7 @@ from __future__ import print_function
import
unittest
import
unittest
import
paddle.fluid.layers
as
layers
import
paddle.fluid.layers
as
layers
from
paddle.fluid.layers.device
import
get_places
import
paddle.fluid.nets
as
nets
import
paddle.fluid.nets
as
nets
from
paddle.fluid.framework
import
Program
,
program_guard
,
default_main_program
from
paddle.fluid.framework
import
Program
,
program_guard
,
default_main_program
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.param_attr
import
ParamAttr
...
@@ -238,7 +239,7 @@ class TestBook(unittest.TestCase):
...
@@ -238,7 +239,7 @@ class TestBook(unittest.TestCase):
def
test_get_places
(
self
):
def
test_get_places
(
self
):
program
=
Program
()
program
=
Program
()
with
program_guard
(
program
):
with
program_guard
(
program
):
x
=
layers
.
get_places
(
device_count
=
4
)
x
=
get_places
(
device_count
=
4
)
self
.
assertIsNotNone
(
x
)
self
.
assertIsNotNone
(
x
)
print
(
str
(
program
))
print
(
str
(
program
))
...
...
python/paddle/fluid/tests/unittests/test_optimizer.py
浏览文件 @
1c9c8e8d
...
@@ -97,7 +97,7 @@ class TestMomentumOptimizer(unittest.TestCase):
...
@@ -97,7 +97,7 @@ class TestMomentumOptimizer(unittest.TestCase):
params_grads
=
append_backward
(
mean_out
)
params_grads
=
append_backward
(
mean_out
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
momentum_optimizer
.
get_accumulators
()),
0
)
self
.
assertEqual
(
len
(
momentum_optimizer
.
get_accumulators
()),
0
)
opts
=
momentum_optimizer
.
create_optimization_pass
(
opts
=
momentum_optimizer
.
_
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
params_grads
,
mul_out
,
init_program
)
self
.
assertEqual
(
len
(
opts
),
3
)
self
.
assertEqual
(
len
(
opts
),
3
)
sgd_op
=
opts
[
-
1
]
sgd_op
=
opts
[
-
1
]
...
@@ -151,7 +151,7 @@ class TestMomentumOptimizer(unittest.TestCase):
...
@@ -151,7 +151,7 @@ class TestMomentumOptimizer(unittest.TestCase):
params_grads
=
append_backward
(
mean_out
)
params_grads
=
append_backward
(
mean_out
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
momentum_optimizer
.
get_accumulators
()),
0
)
self
.
assertEqual
(
len
(
momentum_optimizer
.
get_accumulators
()),
0
)
opts
=
momentum_optimizer
.
create_optimization_pass
(
opts
=
momentum_optimizer
.
_
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
params_grads
,
mul_out
,
init_program
)
self
.
assertEqual
(
len
(
opts
),
3
)
self
.
assertEqual
(
len
(
opts
),
3
)
sgd_op
=
opts
[
-
1
]
sgd_op
=
opts
[
-
1
]
...
@@ -214,8 +214,8 @@ class TestAdagradOptimizer(unittest.TestCase):
...
@@ -214,8 +214,8 @@ class TestAdagradOptimizer(unittest.TestCase):
params_grads
=
append_backward
(
mean_out
)
params_grads
=
append_backward
(
mean_out
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
adagrad_optimizer
.
get_accumulators
()),
0
)
self
.
assertEqual
(
len
(
adagrad_optimizer
.
get_accumulators
()),
0
)
opts
=
adagrad_optimizer
.
create_optimization_pass
(
params_grads
,
mul_out
,
opts
=
adagrad_optimizer
.
_create_optimization_pass
(
init_program
)
params_grads
,
mul_out
,
init_program
)
self
.
assertEqual
(
len
(
opts
),
3
)
self
.
assertEqual
(
len
(
opts
),
3
)
self
.
assertEqual
([
op
.
type
for
op
in
opts
],
self
.
assertEqual
([
op
.
type
for
op
in
opts
],
[
"fill_constant"
,
"elementwise_mul"
,
"adagrad"
])
[
"fill_constant"
,
"elementwise_mul"
,
"adagrad"
])
...
@@ -278,7 +278,7 @@ class TestAdamOptimizer(unittest.TestCase):
...
@@ -278,7 +278,7 @@ class TestAdamOptimizer(unittest.TestCase):
params_grads
=
append_backward
(
mean_out
)
params_grads
=
append_backward
(
mean_out
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
adam_optimizer
.
get_accumulators
()),
0
)
self
.
assertEqual
(
len
(
adam_optimizer
.
get_accumulators
()),
0
)
opts
=
adam_optimizer
.
create_optimization_pass
(
params_grads
,
mul_out
,
opts
=
adam_optimizer
.
_
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
init_program
)
self
.
assertEqual
(
len
(
opts
),
5
)
self
.
assertEqual
(
len
(
opts
),
5
)
self
.
assertEqual
(
self
.
assertEqual
(
...
@@ -345,7 +345,7 @@ class TestAdamaxOptimizer(unittest.TestCase):
...
@@ -345,7 +345,7 @@ class TestAdamaxOptimizer(unittest.TestCase):
params_grads
=
append_backward
(
mean_out
)
params_grads
=
append_backward
(
mean_out
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
adamax_optimizer
.
get_accumulators
()),
0
)
self
.
assertEqual
(
len
(
adamax_optimizer
.
get_accumulators
()),
0
)
opts
=
adamax_optimizer
.
create_optimization_pass
(
params_grads
,
mul_out
,
opts
=
adamax_optimizer
.
_
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
init_program
)
self
.
assertEqual
(
len
(
opts
),
4
)
self
.
assertEqual
(
len
(
opts
),
4
)
self
.
assertEqual
(
self
.
assertEqual
(
...
@@ -409,7 +409,7 @@ class TestDecayedAdagradOptimizer(unittest.TestCase):
...
@@ -409,7 +409,7 @@ class TestDecayedAdagradOptimizer(unittest.TestCase):
params_grads
=
append_backward
(
mean_out
)
params_grads
=
append_backward
(
mean_out
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
decayed_adagrad_optimizer
.
get_accumulators
()),
0
)
self
.
assertEqual
(
len
(
decayed_adagrad_optimizer
.
get_accumulators
()),
0
)
opts
=
decayed_adagrad_optimizer
.
create_optimization_pass
(
opts
=
decayed_adagrad_optimizer
.
_
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
params_grads
,
mul_out
,
init_program
)
self
.
assertEqual
(
len
(
opts
),
3
)
self
.
assertEqual
(
len
(
opts
),
3
)
self
.
assertEqual
(
self
.
assertEqual
(
...
@@ -475,7 +475,7 @@ class TestFtrlOptimizer(unittest.TestCase):
...
@@ -475,7 +475,7 @@ class TestFtrlOptimizer(unittest.TestCase):
params_grads
=
append_backward
(
mean_out
)
params_grads
=
append_backward
(
mean_out
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
params_grads
),
1
)
self
.
assertEqual
(
len
(
ftrl_optimizer
.
get_accumulators
()),
0
)
self
.
assertEqual
(
len
(
ftrl_optimizer
.
get_accumulators
()),
0
)
opts
=
ftrl_optimizer
.
create_optimization_pass
(
params_grads
,
mul_out
,
opts
=
ftrl_optimizer
.
_
create_optimization_pass
(
params_grads
,
mul_out
,
init_program
)
init_program
)
self
.
assertEqual
(
len
(
opts
),
3
)
self
.
assertEqual
(
len
(
opts
),
3
)
self
.
assertEqual
([
op
.
type
for
op
in
opts
],
self
.
assertEqual
([
op
.
type
for
op
in
opts
],
...
...
python/paddle/fluid/tests/unittests/test_parallel_op.py
浏览文件 @
1c9c8e8d
...
@@ -15,6 +15,7 @@
...
@@ -15,6 +15,7 @@
import
unittest
import
unittest
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.layers.device
import
get_places
import
paddle.fluid.profiler
as
profiler
import
paddle.fluid.profiler
as
profiler
import
numpy
import
numpy
...
@@ -115,7 +116,7 @@ class BaseParallelForTest(unittest.TestCase):
...
@@ -115,7 +116,7 @@ class BaseParallelForTest(unittest.TestCase):
if
use_parallel
:
if
use_parallel
:
thread_num
=
fluid
.
core
.
get_cuda_device_count
(
thread_num
=
fluid
.
core
.
get_cuda_device_count
(
)
if
use_gpu
else
8
)
if
use_gpu
else
8
places
=
fluid
.
layers
.
get_places
(
thread_num
)
places
=
get_places
(
thread_num
)
pd
=
fluid
.
layers
.
ParallelDo
(
places
,
use_nccl
=
use_nccl
)
pd
=
fluid
.
layers
.
ParallelDo
(
places
,
use_nccl
=
use_nccl
)
data
=
next
(
generator
)
data
=
next
(
generator
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录