Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
91f0573b
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
91f0573b
编写于
7月 26, 2018
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix the overfix of 2to3 for print function
上级
559d3632
变更
32
隐藏空白更改
内联
并排
Showing
32 changed file
with
156 addition
and
155 deletion
+156
-155
CMakeLists.txt
CMakeLists.txt
+1
-0
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+1
-1
python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py
...d/tests/book/high-level-api/fit_a_line/test_fit_a_line.py
+1
-1
python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py
.../image_classification/test_image_classification_resnet.py
+2
-2
python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py
...api/image_classification/test_image_classification_vgg.py
+2
-2
python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py
.../label_semantic_roles/test_label_semantic_roles_newapi.py
+10
-10
python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py
...level-api/machine_translation/test_machine_translation.py
+2
-2
python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py
...-level-api/recognize_digits/test_recognize_digits_conv.py
+5
-5
python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py
...h-level-api/recognize_digits/test_recognize_digits_mlp.py
+5
-5
python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py
...-api/recommender_system/test_recommender_system_newapi.py
+2
-2
python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py
...pi/understand_sentiment/test_understand_sentiment_conv.py
+13
-13
python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py
...rstand_sentiment/test_understand_sentiment_dynamic_rnn.py
+13
-13
python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py
...stand_sentiment/test_understand_sentiment_stacked_lstm.py
+13
-13
python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py
...sts/book/high-level-api/word2vec/test_word2vec_new_api.py
+2
-2
python/paddle/fluid/tests/book/notest_understand_sentiment.py
...on/paddle/fluid/tests/book/notest_understand_sentiment.py
+10
-10
python/paddle/fluid/tests/book/test_fit_a_line.py
python/paddle/fluid/tests/book/test_fit_a_line.py
+3
-3
python/paddle/fluid/tests/book/test_image_classification.py
python/paddle/fluid/tests/book/test_image_classification.py
+4
-4
python/paddle/fluid/tests/book/test_label_semantic_roles.py
python/paddle/fluid/tests/book/test_label_semantic_roles.py
+11
-11
python/paddle/fluid/tests/book/test_machine_translation.py
python/paddle/fluid/tests/book/test_machine_translation.py
+3
-3
python/paddle/fluid/tests/book/test_recognize_digits.py
python/paddle/fluid/tests/book/test_recognize_digits.py
+3
-3
python/paddle/fluid/tests/book/test_recommender_system.py
python/paddle/fluid/tests/book/test_recommender_system.py
+7
-7
python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py
python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py
+11
-11
python/paddle/fluid/tests/book/test_word2vec.py
python/paddle/fluid/tests/book/test_word2vec.py
+7
-7
python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py
.../tests/book_memory_optimization/test_memopt_fit_a_line.py
+1
-1
python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py
...ry_optimization/test_memopt_image_classification_train.py
+2
-2
python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py
...ok_memory_optimization/test_memopt_machine_translation.py
+2
-2
python/paddle/fluid/tests/demo/fc_gan.py
python/paddle/fluid/tests/demo/fc_gan.py
+2
-2
python/paddle/fluid/tests/test_detection.py
python/paddle/fluid/tests/test_detection.py
+4
-4
python/paddle/fluid/tests/test_if_else_op.py
python/paddle/fluid/tests/test_if_else_op.py
+2
-2
python/paddle/fluid/tests/unittests/benchmark.py
python/paddle/fluid/tests/unittests/benchmark.py
+4
-4
python/paddle/fluid/tests/unittests/parallel_executor_test_base.py
...ddle/fluid/tests/unittests/parallel_executor_test_base.py
+3
-3
python/paddle/fluid/transpiler/memory_optimization_transpiler.py
...paddle/fluid/transpiler/memory_optimization_transpiler.py
+5
-5
未找到文件。
CMakeLists.txt
浏览文件 @
91f0573b
...
@@ -72,6 +72,7 @@ option(PY_VERSION "Compile PaddlePaddle with python3 support" ${PY_VER
...
@@ -72,6 +72,7 @@ option(PY_VERSION "Compile PaddlePaddle with python3 support" ${PY_VER
if
(
NOT PY_VERSION
)
if
(
NOT PY_VERSION
)
set
(
PY_VERSION 2.7
)
set
(
PY_VERSION 2.7
)
endif
()
endif
()
set
(
PYBIND11_PYTHON_VERSION
${
PY_VERSION
}
)
# CMAKE_BUILD_TYPE
# CMAKE_BUILD_TYPE
if
(
NOT CMAKE_BUILD_TYPE
)
if
(
NOT CMAKE_BUILD_TYPE
)
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
91f0573b
...
@@ -106,7 +106,7 @@ class Optimizer(object):
...
@@ -106,7 +106,7 @@ class Optimizer(object):
param_lr
=
param
.
optimize_attr
[
'learning_rate'
]
param_lr
=
param
.
optimize_attr
[
'learning_rate'
]
if
type
(
param_lr
)
==
Variable
:
if
type
(
param_lr
)
==
Variable
:
# param learning rate has been updated (LARS)
# param learning rate has been updated (LARS)
print
(
(
"returns updated param lr "
,
param_lr
)
)
print
(
"returns updated param lr "
,
param_lr
)
return
param_lr
return
param_lr
else
:
else
:
if
param_lr
==
1.0
:
if
param_lr
==
1.0
:
...
...
python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py
浏览文件 @
91f0573b
...
@@ -94,7 +94,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
...
@@ -94,7 +94,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
tensor_x
=
numpy
.
random
.
uniform
(
0
,
10
,
[
batch_size
,
13
]).
astype
(
"float32"
)
tensor_x
=
numpy
.
random
.
uniform
(
0
,
10
,
[
batch_size
,
13
]).
astype
(
"float32"
)
results
=
inferencer
.
infer
({
'x'
:
tensor_x
})
results
=
inferencer
.
infer
({
'x'
:
tensor_x
})
print
(
(
"infer results: "
,
results
[
0
])
)
print
(
"infer results: "
,
results
[
0
]
)
def
main
(
use_cuda
):
def
main
(
use_cuda
):
...
...
python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py
浏览文件 @
91f0573b
...
@@ -105,7 +105,7 @@ def train(use_cuda, train_program, params_dirname):
...
@@ -105,7 +105,7 @@ def train(use_cuda, train_program, params_dirname):
avg_cost
,
accuracy
=
trainer
.
test
(
avg_cost
,
accuracy
=
trainer
.
test
(
reader
=
test_reader
,
feed_order
=
[
'pixel'
,
'label'
])
reader
=
test_reader
,
feed_order
=
[
'pixel'
,
'label'
])
print
(
(
'Loss {0:2.2}, Acc {1:2.2}'
.
format
(
avg_cost
,
accuracy
)
))
print
(
'Loss {0:2.2}, Acc {1:2.2}'
.
format
(
avg_cost
,
accuracy
))
if
accuracy
>
0.01
:
# Low threshold for speeding up CI
if
accuracy
>
0.01
:
# Low threshold for speeding up CI
if
params_dirname
is
not
None
:
if
params_dirname
is
not
None
:
...
@@ -134,7 +134,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
...
@@ -134,7 +134,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
tensor_img
=
numpy
.
random
.
rand
(
1
,
3
,
32
,
32
).
astype
(
"float32"
)
tensor_img
=
numpy
.
random
.
rand
(
1
,
3
,
32
,
32
).
astype
(
"float32"
)
results
=
inferencer
.
infer
({
'pixel'
:
tensor_img
})
results
=
inferencer
.
infer
({
'pixel'
:
tensor_img
})
print
(
(
"infer results: "
,
results
)
)
print
(
"infer results: "
,
results
)
def
main
(
use_cuda
):
def
main
(
use_cuda
):
...
...
python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py
浏览文件 @
91f0573b
...
@@ -82,7 +82,7 @@ def train(use_cuda, train_program, params_dirname):
...
@@ -82,7 +82,7 @@ def train(use_cuda, train_program, params_dirname):
avg_cost
,
accuracy
=
trainer
.
test
(
avg_cost
,
accuracy
=
trainer
.
test
(
reader
=
test_reader
,
feed_order
=
[
'pixel'
,
'label'
])
reader
=
test_reader
,
feed_order
=
[
'pixel'
,
'label'
])
print
(
(
'Loss {0:2.2}, Acc {1:2.2}'
.
format
(
avg_cost
,
accuracy
)
))
print
(
'Loss {0:2.2}, Acc {1:2.2}'
.
format
(
avg_cost
,
accuracy
))
if
accuracy
>
0.01
:
# Low threshold for speeding up CI
if
accuracy
>
0.01
:
# Low threshold for speeding up CI
if
params_dirname
is
not
None
:
if
params_dirname
is
not
None
:
...
@@ -111,7 +111,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
...
@@ -111,7 +111,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
tensor_img
=
numpy
.
random
.
rand
(
1
,
3
,
32
,
32
).
astype
(
"float32"
)
tensor_img
=
numpy
.
random
.
rand
(
1
,
3
,
32
,
32
).
astype
(
"float32"
)
results
=
inferencer
.
infer
({
'pixel'
:
tensor_img
})
results
=
inferencer
.
infer
({
'pixel'
:
tensor_img
})
print
(
(
"infer results: "
,
results
)
)
print
(
"infer results: "
,
results
)
def
main
(
use_cuda
):
def
main
(
use_cuda
):
...
...
python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py
浏览文件 @
91f0573b
...
@@ -171,7 +171,7 @@ def train(use_cuda, train_program, params_dirname):
...
@@ -171,7 +171,7 @@ def train(use_cuda, train_program, params_dirname):
# get avg cost
# get avg cost
avg_cost
=
np
.
array
(
avg_cost_set
).
mean
()
avg_cost
=
np
.
array
(
avg_cost_set
).
mean
()
print
(
(
"avg_cost: %s"
%
avg_cost
)
)
print
(
"avg_cost: %s"
%
avg_cost
)
if
float
(
avg_cost
)
<
100.0
:
# Large value to increase CI speed
if
float
(
avg_cost
)
<
100.0
:
# Large value to increase CI speed
trainer
.
save_params
(
params_dirname
)
trainer
.
save_params
(
params_dirname
)
...
@@ -183,8 +183,8 @@ def train(use_cuda, train_program, params_dirname):
...
@@ -183,8 +183,8 @@ def train(use_cuda, train_program, params_dirname):
sys
.
exit
(
"got NaN loss, training failed."
)
sys
.
exit
(
"got NaN loss, training failed."
)
elif
isinstance
(
event
,
fluid
.
EndStepEvent
):
elif
isinstance
(
event
,
fluid
.
EndStepEvent
):
print
(
(
"Step {0}, Epoch {1} Metrics {2}"
.
format
(
print
(
"Step {0}, Epoch {1} Metrics {2}"
.
format
(
event
.
step
,
event
.
epoch
,
list
(
map
(
np
.
array
,
event
.
metrics
))))
)
event
.
step
,
event
.
epoch
,
list
(
map
(
np
.
array
,
event
.
metrics
))))
if
event
.
step
==
1
:
# Run 2 iterations to speed CI
if
event
.
step
==
1
:
# Run 2 iterations to speed CI
trainer
.
save_params
(
params_dirname
)
trainer
.
save_params
(
params_dirname
)
trainer
.
stop
()
trainer
.
stop
()
...
@@ -206,14 +206,14 @@ def infer(use_cuda, inference_program, params_dirname):
...
@@ -206,14 +206,14 @@ def infer(use_cuda, inference_program, params_dirname):
inference_program
,
param_path
=
params_dirname
,
place
=
place
)
inference_program
,
param_path
=
params_dirname
,
place
=
place
)
# Setup input by creating LoDTensor to represent sequence of words.
# Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of the LoDTensor and the shape of
# Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to
# each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector.
# look up for the corresponding word vector.
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one level of detail. Then the created LoDTensor will have only
# which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic
# one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of
# element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively.
# length 3, 4 and 2, respectively.
# Note that recursive_sequence_lengths should be a list of lists.
# Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens
=
[[
3
,
4
,
2
]]
recursive_seq_lens
=
[[
3
,
4
,
2
]]
base_shape
=
[
1
]
base_shape
=
[
1
]
...
@@ -248,7 +248,7 @@ def infer(use_cuda, inference_program, params_dirname):
...
@@ -248,7 +248,7 @@ def infer(use_cuda, inference_program, params_dirname):
},
},
return_numpy
=
False
)
return_numpy
=
False
)
print
(
(
"infer results: "
,
np
.
array
(
results
[
0
]).
shape
)
)
print
(
"infer results: "
,
np
.
array
(
results
[
0
]).
shape
)
def
main
(
use_cuda
):
def
main
(
use_cuda
):
...
...
python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py
浏览文件 @
91f0573b
...
@@ -197,7 +197,7 @@ def train(use_cuda, is_sparse, is_local=True):
...
@@ -197,7 +197,7 @@ def train(use_cuda, is_sparse, is_local=True):
def
event_handler
(
event
):
def
event_handler
(
event
):
if
isinstance
(
event
,
fluid
.
EndStepEvent
):
if
isinstance
(
event
,
fluid
.
EndStepEvent
):
print
(
(
'pass_id='
+
str
(
event
.
epoch
)
+
' batch='
+
str
(
event
.
step
)
))
print
(
'pass_id='
+
str
(
event
.
epoch
)
+
' batch='
+
str
(
event
.
step
))
if
event
.
step
==
10
:
if
event
.
step
==
10
:
trainer
.
stop
()
trainer
.
stop
()
...
@@ -259,7 +259,7 @@ def decode_main(use_cuda, is_sparse):
...
@@ -259,7 +259,7 @@ def decode_main(use_cuda, is_sparse):
feed
=
feed_dict
,
feed
=
feed_dict
,
fetch_list
=
[
translation_ids
,
translation_scores
],
fetch_list
=
[
translation_ids
,
translation_scores
],
return_numpy
=
False
)
return_numpy
=
False
)
print
(
(
result_ids
.
recursive_sequence_lengths
()
))
print
(
result_ids
.
recursive_sequence_lengths
(
))
break
break
...
...
python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py
浏览文件 @
91f0573b
...
@@ -78,14 +78,14 @@ def train(use_cuda, train_program, params_dirname):
...
@@ -78,14 +78,14 @@ def train(use_cuda, train_program, params_dirname):
avg_cost
,
acc
=
trainer
.
test
(
avg_cost
,
acc
=
trainer
.
test
(
reader
=
test_reader
,
feed_order
=
[
'img'
,
'label'
])
reader
=
test_reader
,
feed_order
=
[
'img'
,
'label'
])
print
(
(
"avg_cost: %s"
%
avg_cost
)
)
print
(
"avg_cost: %s"
%
avg_cost
)
print
(
(
"acc : %s"
%
acc
)
)
print
(
"acc : %s"
%
acc
)
if
acc
>
0.2
:
# Smaller value to increase CI speed
if
acc
>
0.2
:
# Smaller value to increase CI speed
trainer
.
save_params
(
params_dirname
)
trainer
.
save_params
(
params_dirname
)
else
:
else
:
print
(
(
'BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'
.
format
(
print
(
'BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'
.
format
(
event
.
epoch
+
1
,
avg_cost
,
acc
))
)
event
.
epoch
+
1
,
avg_cost
,
acc
))
if
math
.
isnan
(
avg_cost
):
if
math
.
isnan
(
avg_cost
):
sys
.
exit
(
"got NaN loss, training failed."
)
sys
.
exit
(
"got NaN loss, training failed."
)
elif
isinstance
(
event
,
fluid
.
EndStepEvent
):
elif
isinstance
(
event
,
fluid
.
EndStepEvent
):
...
@@ -118,7 +118,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
...
@@ -118,7 +118,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
results
=
inferencer
.
infer
({
'img'
:
tensor_img
})
results
=
inferencer
.
infer
({
'img'
:
tensor_img
})
print
(
(
"infer results: "
,
results
[
0
])
)
print
(
"infer results: "
,
results
[
0
]
)
def
main
(
use_cuda
):
def
main
(
use_cuda
):
...
...
python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py
浏览文件 @
91f0573b
...
@@ -61,14 +61,14 @@ def train(use_cuda, train_program, params_dirname):
...
@@ -61,14 +61,14 @@ def train(use_cuda, train_program, params_dirname):
avg_cost
,
acc
=
trainer
.
test
(
avg_cost
,
acc
=
trainer
.
test
(
reader
=
test_reader
,
feed_order
=
[
'img'
,
'label'
])
reader
=
test_reader
,
feed_order
=
[
'img'
,
'label'
])
print
(
(
"avg_cost: %s"
%
avg_cost
)
)
print
(
"avg_cost: %s"
%
avg_cost
)
print
(
(
"acc : %s"
%
acc
)
)
print
(
"acc : %s"
%
acc
)
if
acc
>
0.2
:
# Smaller value to increase CI speed
if
acc
>
0.2
:
# Smaller value to increase CI speed
trainer
.
save_params
(
params_dirname
)
trainer
.
save_params
(
params_dirname
)
else
:
else
:
print
(
(
'BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'
.
format
(
print
(
'BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'
.
format
(
event
.
epoch
+
1
,
avg_cost
,
acc
))
)
event
.
epoch
+
1
,
avg_cost
,
acc
))
if
math
.
isnan
(
avg_cost
):
if
math
.
isnan
(
avg_cost
):
sys
.
exit
(
"got NaN loss, training failed."
)
sys
.
exit
(
"got NaN loss, training failed."
)
...
@@ -96,7 +96,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
...
@@ -96,7 +96,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
results
=
inferencer
.
infer
({
'img'
:
tensor_img
})
results
=
inferencer
.
infer
({
'img'
:
tensor_img
})
print
(
(
"infer results: "
,
results
[
0
])
)
print
(
"infer results: "
,
results
[
0
]
)
def
main
(
use_cuda
):
def
main
(
use_cuda
):
...
...
python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py
浏览文件 @
91f0573b
...
@@ -180,7 +180,7 @@ def train(use_cuda, train_program, params_dirname):
...
@@ -180,7 +180,7 @@ def train(use_cuda, train_program, params_dirname):
# get avg cost
# get avg cost
avg_cost
=
np
.
array
(
avg_cost_set
).
mean
()
avg_cost
=
np
.
array
(
avg_cost_set
).
mean
()
print
(
(
"avg_cost: %s"
%
avg_cost
)
)
print
(
"avg_cost: %s"
%
avg_cost
)
if
float
(
avg_cost
)
<
4
:
# Smaller value to increase CI speed
if
float
(
avg_cost
)
<
4
:
# Smaller value to increase CI speed
trainer
.
save_params
(
params_dirname
)
trainer
.
save_params
(
params_dirname
)
...
@@ -240,7 +240,7 @@ def infer(use_cuda, inference_program, params_dirname):
...
@@ -240,7 +240,7 @@ def infer(use_cuda, inference_program, params_dirname):
},
},
return_numpy
=
False
)
return_numpy
=
False
)
print
(
(
"infer results: "
,
np
.
array
(
results
[
0
])
))
print
(
"infer results: "
,
np
.
array
(
results
[
0
]
))
def
main
(
use_cuda
):
def
main
(
use_cuda
):
...
...
python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py
浏览文件 @
91f0573b
...
@@ -82,21 +82,21 @@ def train(use_cuda, train_program, params_dirname):
...
@@ -82,21 +82,21 @@ def train(use_cuda, train_program, params_dirname):
avg_cost
,
acc
=
trainer
.
test
(
avg_cost
,
acc
=
trainer
.
test
(
reader
=
test_reader
,
feed_order
=
[
'words'
,
'label'
])
reader
=
test_reader
,
feed_order
=
[
'words'
,
'label'
])
print
(
(
"avg_cost: %s"
%
avg_cost
)
)
print
(
"avg_cost: %s"
%
avg_cost
)
print
(
(
"acc : %s"
%
acc
)
)
print
(
"acc : %s"
%
acc
)
if
acc
>
0.2
:
# Smaller value to increase CI speed
if
acc
>
0.2
:
# Smaller value to increase CI speed
trainer
.
save_params
(
params_dirname
)
trainer
.
save_params
(
params_dirname
)
trainer
.
stop
()
trainer
.
stop
()
else
:
else
:
print
(
(
'BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'
.
format
(
print
(
'BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'
.
format
(
event
.
epoch
+
1
,
avg_cost
,
acc
))
)
event
.
epoch
+
1
,
avg_cost
,
acc
))
if
math
.
isnan
(
avg_cost
):
if
math
.
isnan
(
avg_cost
):
sys
.
exit
(
"got NaN loss, training failed."
)
sys
.
exit
(
"got NaN loss, training failed."
)
elif
isinstance
(
event
,
fluid
.
EndStepEvent
):
elif
isinstance
(
event
,
fluid
.
EndStepEvent
):
print
(
(
"Step {0}, Epoch {1} Metrics {2}"
.
format
(
print
(
"Step {0}, Epoch {1} Metrics {2}"
.
format
(
event
.
step
,
event
.
epoch
,
list
(
map
(
np
.
array
,
event
.
metrics
))))
)
event
.
step
,
event
.
epoch
,
list
(
map
(
np
.
array
,
event
.
metrics
))))
if
event
.
step
==
1
:
# Run 2 iterations to speed CI
if
event
.
step
==
1
:
# Run 2 iterations to speed CI
trainer
.
save_params
(
params_dirname
)
trainer
.
save_params
(
params_dirname
)
trainer
.
stop
()
trainer
.
stop
()
...
@@ -123,14 +123,14 @@ def infer(use_cuda, inference_program, params_dirname=None):
...
@@ -123,14 +123,14 @@ def infer(use_cuda, inference_program, params_dirname=None):
place
=
place
)
place
=
place
)
# Setup input by creating LoDTensor to represent sequence of words.
# Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of the LoDTensor and the shape of
# Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to
# each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector.
# look up for the corresponding word vector.
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one level of detail. Then the created LoDTensor will have only
# which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic
# one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of
# element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively.
# length 3, 4 and 2, respectively.
# Note that recursive_sequence_lengths should be a list of lists.
# Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens
=
[[
3
,
4
,
2
]]
recursive_seq_lens
=
[[
3
,
4
,
2
]]
base_shape
=
[
1
]
base_shape
=
[
1
]
...
@@ -138,7 +138,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
...
@@ -138,7 +138,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
tensor_words
=
fluid
.
create_random_int_lodtensor
(
tensor_words
=
fluid
.
create_random_int_lodtensor
(
recursive_seq_lens
,
base_shape
,
place
,
low
=
0
,
high
=
len
(
word_dict
)
-
1
)
recursive_seq_lens
,
base_shape
,
place
,
low
=
0
,
high
=
len
(
word_dict
)
-
1
)
results
=
inferencer
.
infer
({
'words'
:
tensor_words
})
results
=
inferencer
.
infer
({
'words'
:
tensor_words
})
print
(
(
"infer results: "
,
results
)
)
print
(
"infer results: "
,
results
)
def
main
(
use_cuda
):
def
main
(
use_cuda
):
...
...
python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py
浏览文件 @
91f0573b
...
@@ -97,21 +97,21 @@ def train(use_cuda, train_program, params_dirname):
...
@@ -97,21 +97,21 @@ def train(use_cuda, train_program, params_dirname):
avg_cost
,
acc
=
trainer
.
test
(
avg_cost
,
acc
=
trainer
.
test
(
reader
=
test_reader
,
feed_order
=
[
'words'
,
'label'
])
reader
=
test_reader
,
feed_order
=
[
'words'
,
'label'
])
print
(
(
"avg_cost: %s"
%
avg_cost
)
)
print
(
"avg_cost: %s"
%
avg_cost
)
print
(
(
"acc : %s"
%
acc
)
)
print
(
"acc : %s"
%
acc
)
if
acc
>
0.2
:
# Smaller value to increase CI speed
if
acc
>
0.2
:
# Smaller value to increase CI speed
trainer
.
save_params
(
params_dirname
)
trainer
.
save_params
(
params_dirname
)
trainer
.
stop
()
trainer
.
stop
()
else
:
else
:
print
(
(
'BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'
.
format
(
print
(
'BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'
.
format
(
event
.
epoch
+
1
,
avg_cost
,
acc
))
)
event
.
epoch
+
1
,
avg_cost
,
acc
))
if
math
.
isnan
(
avg_cost
):
if
math
.
isnan
(
avg_cost
):
sys
.
exit
(
"got NaN loss, training failed."
)
sys
.
exit
(
"got NaN loss, training failed."
)
elif
isinstance
(
event
,
fluid
.
EndStepEvent
):
elif
isinstance
(
event
,
fluid
.
EndStepEvent
):
print
(
(
"Step {0}, Epoch {1} Metrics {2}"
.
format
(
print
(
"Step {0}, Epoch {1} Metrics {2}"
.
format
(
event
.
step
,
event
.
epoch
,
list
(
map
(
np
.
array
,
event
.
metrics
))))
)
event
.
step
,
event
.
epoch
,
list
(
map
(
np
.
array
,
event
.
metrics
))))
if
event
.
step
==
1
:
# Run 2 iterations to speed CI
if
event
.
step
==
1
:
# Run 2 iterations to speed CI
trainer
.
save_params
(
params_dirname
)
trainer
.
save_params
(
params_dirname
)
trainer
.
stop
()
trainer
.
stop
()
...
@@ -138,14 +138,14 @@ def infer(use_cuda, inference_program, params_dirname=None):
...
@@ -138,14 +138,14 @@ def infer(use_cuda, inference_program, params_dirname=None):
place
=
place
)
place
=
place
)
# Setup input by creating LoDTensor to represent sequence of words.
# Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of the LoDTensor and the shape of
# Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to
# each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector.
# look up for the corresponding word vector.
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one level of detail. Then the created LoDTensor will have only
# which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic
# one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of
# element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively.
# length 3, 4 and 2, respectively.
# Note that recursive_sequence_lengths should be a list of lists.
# Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens
=
[[
3
,
4
,
2
]]
recursive_seq_lens
=
[[
3
,
4
,
2
]]
base_shape
=
[
1
]
base_shape
=
[
1
]
...
@@ -153,7 +153,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
...
@@ -153,7 +153,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
tensor_words
=
fluid
.
create_random_int_lodtensor
(
tensor_words
=
fluid
.
create_random_int_lodtensor
(
recursive_seq_lens
,
base_shape
,
place
,
low
=
0
,
high
=
len
(
word_dict
)
-
1
)
recursive_seq_lens
,
base_shape
,
place
,
low
=
0
,
high
=
len
(
word_dict
)
-
1
)
results
=
inferencer
.
infer
({
'words'
:
tensor_words
})
results
=
inferencer
.
infer
({
'words'
:
tensor_words
})
print
(
(
"infer results: "
,
results
)
)
print
(
"infer results: "
,
results
)
def
main
(
use_cuda
):
def
main
(
use_cuda
):
...
...
python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py
浏览文件 @
91f0573b
...
@@ -91,21 +91,21 @@ def train(use_cuda, train_program, params_dirname):
...
@@ -91,21 +91,21 @@ def train(use_cuda, train_program, params_dirname):
avg_cost
,
acc
=
trainer
.
test
(
avg_cost
,
acc
=
trainer
.
test
(
reader
=
test_reader
,
feed_order
=
[
'words'
,
'label'
])
reader
=
test_reader
,
feed_order
=
[
'words'
,
'label'
])
print
(
(
"avg_cost: %s"
%
avg_cost
)
)
print
(
"avg_cost: %s"
%
avg_cost
)
print
(
(
"acc : %s"
%
acc
)
)
print
(
"acc : %s"
%
acc
)
if
acc
>
0.2
:
# Smaller value to increase CI speed
if
acc
>
0.2
:
# Smaller value to increase CI speed
trainer
.
save_params
(
params_dirname
)
trainer
.
save_params
(
params_dirname
)
trainer
.
stop
()
trainer
.
stop
()
else
:
else
:
print
(
(
'BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'
.
format
(
print
(
'BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'
.
format
(
event
.
epoch
+
1
,
avg_cost
,
acc
))
)
event
.
epoch
+
1
,
avg_cost
,
acc
))
if
math
.
isnan
(
avg_cost
):
if
math
.
isnan
(
avg_cost
):
sys
.
exit
(
"got NaN loss, training failed."
)
sys
.
exit
(
"got NaN loss, training failed."
)
elif
isinstance
(
event
,
fluid
.
EndStepEvent
):
elif
isinstance
(
event
,
fluid
.
EndStepEvent
):
print
(
(
"Step {0}, Epoch {1} Metrics {2}"
.
format
(
print
(
"Step {0}, Epoch {1} Metrics {2}"
.
format
(
event
.
step
,
event
.
epoch
,
list
(
map
(
np
.
array
,
event
.
metrics
))))
)
event
.
step
,
event
.
epoch
,
list
(
map
(
np
.
array
,
event
.
metrics
))))
if
event
.
step
==
1
:
# Run 2 iterations to speed CI
if
event
.
step
==
1
:
# Run 2 iterations to speed CI
trainer
.
save_params
(
params_dirname
)
trainer
.
save_params
(
params_dirname
)
trainer
.
stop
()
trainer
.
stop
()
...
@@ -133,14 +133,14 @@ def infer(use_cuda, inference_program, params_dirname=None):
...
@@ -133,14 +133,14 @@ def infer(use_cuda, inference_program, params_dirname=None):
place
=
place
)
place
=
place
)
# Setup input by creating LoDTensor to represent sequence of words.
# Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of the LoDTensor and the shape of
# Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to
# each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector.
# look up for the corresponding word vector.
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one level of detail. Then the created LoDTensor will have only
# which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic
# one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of
# element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively.
# length 3, 4 and 2, respectively.
# Note that recursive_sequence_lengths should be a list of lists.
# Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens
=
[[
3
,
4
,
2
]]
recursive_seq_lens
=
[[
3
,
4
,
2
]]
base_shape
=
[
1
]
base_shape
=
[
1
]
...
@@ -148,7 +148,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
...
@@ -148,7 +148,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
tensor_words
=
fluid
.
create_random_int_lodtensor
(
tensor_words
=
fluid
.
create_random_int_lodtensor
(
recursive_seq_lens
,
base_shape
,
place
,
low
=
0
,
high
=
len
(
word_dict
)
-
1
)
recursive_seq_lens
,
base_shape
,
place
,
low
=
0
,
high
=
len
(
word_dict
)
-
1
)
results
=
inferencer
.
infer
({
'words'
:
tensor_words
})
results
=
inferencer
.
infer
({
'words'
:
tensor_words
})
print
(
(
"infer results: "
,
results
)
)
print
(
"infer results: "
,
results
)
def
main
(
use_cuda
):
def
main
(
use_cuda
):
...
...
python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py
浏览文件 @
91f0573b
...
@@ -98,7 +98,7 @@ def train(use_cuda, train_program, params_dirname):
...
@@ -98,7 +98,7 @@ def train(use_cuda, train_program, params_dirname):
reader
=
test_reader
,
reader
=
test_reader
,
feed_order
=
[
'firstw'
,
'secondw'
,
'thirdw'
,
'forthw'
,
'nextw'
])
feed_order
=
[
'firstw'
,
'secondw'
,
'thirdw'
,
'forthw'
,
'nextw'
])
avg_cost
=
outs
[
0
]
avg_cost
=
outs
[
0
]
print
(
(
"loss= "
,
avg_cost
)
)
print
(
"loss= "
,
avg_cost
)
if
avg_cost
<
10.0
:
if
avg_cost
<
10.0
:
trainer
.
save_params
(
params_dirname
)
trainer
.
save_params
(
params_dirname
)
...
@@ -149,7 +149,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
...
@@ -149,7 +149,7 @@ def infer(use_cuda, inference_program, params_dirname=None):
'forthw'
:
fourth_word
'forthw'
:
fourth_word
},
},
return_numpy
=
False
)
return_numpy
=
False
)
print
(
(
np
.
array
(
result
[
0
])
))
print
(
np
.
array
(
result
[
0
]
))
def
main
(
use_cuda
,
is_sparse
):
def
main
(
use_cuda
,
is_sparse
):
...
...
python/paddle/fluid/tests/book/notest_understand_sentiment.py
浏览文件 @
91f0573b
...
@@ -180,7 +180,7 @@ def train(word_dict,
...
@@ -180,7 +180,7 @@ def train(word_dict,
cost_val
,
acc_val
=
exe
.
run
(
main_program
,
cost_val
,
acc_val
=
exe
.
run
(
main_program
,
feed
=
feeder
.
feed
(
data
),
feed
=
feeder
.
feed
(
data
),
fetch_list
=
[
cost
,
acc_out
])
fetch_list
=
[
cost
,
acc_out
])
print
(
(
"cost="
+
str
(
cost_val
)
+
" acc="
+
str
(
acc_val
)
))
print
(
"cost="
+
str
(
cost_val
)
+
" acc="
+
str
(
acc_val
))
if
cost_val
<
0.4
and
acc_val
>
0.8
:
if
cost_val
<
0.4
and
acc_val
>
0.8
:
if
save_dirname
is
not
None
:
if
save_dirname
is
not
None
:
fluid
.
io
.
save_inference_model
(
save_dirname
,
[
"words"
],
fluid
.
io
.
save_inference_model
(
save_dirname
,
[
"words"
],
...
@@ -235,14 +235,14 @@ def infer(word_dict, use_cuda, save_dirname=None):
...
@@ -235,14 +235,14 @@ def infer(word_dict, use_cuda, save_dirname=None):
word_dict_len
=
len
(
word_dict
)
word_dict_len
=
len
(
word_dict
)
# Setup input by creating LoDTensor to represent sequence of words.
# Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of the LoDTensor and the shape of
# Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to
# each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector.
# look up for the corresponding word vector.
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one level of detail. Then the created LoDTensor will have only
# which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic
# one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of
# element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively.
# length 3, 4 and 2, respectively.
# Note that recursive_sequence_lengths should be a list of lists.
# Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens
=
[[
3
,
4
,
2
]]
recursive_seq_lens
=
[[
3
,
4
,
2
]]
base_shape
=
[
1
]
base_shape
=
[
1
]
...
@@ -261,10 +261,10 @@ def infer(word_dict, use_cuda, save_dirname=None):
...
@@ -261,10 +261,10 @@ def infer(word_dict, use_cuda, save_dirname=None):
feed
=
{
feed_target_names
[
0
]:
tensor_words
},
feed
=
{
feed_target_names
[
0
]:
tensor_words
},
fetch_list
=
fetch_targets
,
fetch_list
=
fetch_targets
,
return_numpy
=
False
)
return_numpy
=
False
)
print
(
(
results
[
0
].
recursive_sequence_lengths
()
))
print
(
results
[
0
].
recursive_sequence_lengths
(
))
np_data
=
np
.
array
(
results
[
0
])
np_data
=
np
.
array
(
results
[
0
])
print
(
(
"Inference Shape: "
,
np_data
.
shape
)
)
print
(
"Inference Shape: "
,
np_data
.
shape
)
print
(
(
"Inference results: "
,
np_data
)
)
print
(
"Inference results: "
,
np_data
)
def
main
(
word_dict
,
net_method
,
use_cuda
,
parallel
=
False
,
save_dirname
=
None
):
def
main
(
word_dict
,
net_method
,
use_cuda
,
parallel
=
False
,
save_dirname
=
None
):
...
...
python/paddle/fluid/tests/book/test_fit_a_line.py
浏览文件 @
91f0573b
...
@@ -124,9 +124,9 @@ def infer(use_cuda, save_dirname=None):
...
@@ -124,9 +124,9 @@ def infer(use_cuda, save_dirname=None):
results
=
exe
.
run
(
inference_program
,
results
=
exe
.
run
(
inference_program
,
feed
=
{
feed_target_names
[
0
]:
numpy
.
array
(
test_feat
)},
feed
=
{
feed_target_names
[
0
]:
numpy
.
array
(
test_feat
)},
fetch_list
=
fetch_targets
)
fetch_list
=
fetch_targets
)
print
(
(
"infer shape: "
,
results
[
0
].
shape
)
)
print
(
"infer shape: "
,
results
[
0
].
shape
)
print
(
(
"infer results: "
,
results
[
0
])
)
print
(
"infer results: "
,
results
[
0
]
)
print
(
(
"ground truth: "
,
test_label
)
)
print
(
"ground truth: "
,
test_label
)
def
main
(
use_cuda
,
is_local
=
True
):
def
main
(
use_cuda
,
is_local
=
True
):
...
...
python/paddle/fluid/tests/book/test_image_classification.py
浏览文件 @
91f0573b
...
@@ -119,7 +119,7 @@ def train(net_type, use_cuda, save_dirname, is_local):
...
@@ -119,7 +119,7 @@ def train(net_type, use_cuda, save_dirname, is_local):
avg_cost
=
fluid
.
layers
.
mean
(
cost
)
avg_cost
=
fluid
.
layers
.
mean
(
cost
)
acc
=
fluid
.
layers
.
accuracy
(
input
=
predict
,
label
=
label
)
acc
=
fluid
.
layers
.
accuracy
(
input
=
predict
,
label
=
label
)
# Test program
# Test program
test_program
=
fluid
.
default_main_program
().
clone
(
for_test
=
True
)
test_program
=
fluid
.
default_main_program
().
clone
(
for_test
=
True
)
optimizer
=
fluid
.
optimizer
.
Adam
(
learning_rate
=
0.001
)
optimizer
=
fluid
.
optimizer
.
Adam
(
learning_rate
=
0.001
)
...
@@ -163,10 +163,10 @@ def train(net_type, use_cuda, save_dirname, is_local):
...
@@ -163,10 +163,10 @@ def train(net_type, use_cuda, save_dirname, is_local):
acc_value
=
numpy
.
array
(
acc_list
).
mean
()
acc_value
=
numpy
.
array
(
acc_list
).
mean
()
avg_loss_value
=
numpy
.
array
(
avg_loss_list
).
mean
()
avg_loss_value
=
numpy
.
array
(
avg_loss_list
).
mean
()
print
(
(
print
(
'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'
.
'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'
.
format
(
pass_id
,
batch_id
+
1
,
format
(
pass_id
,
batch_id
+
1
,
float
(
avg_loss_value
),
float
(
acc_value
)))
)
float
(
avg_loss_value
),
float
(
acc_value
)))
if
acc_value
>
0.01
:
# Low threshold for speeding up CI
if
acc_value
>
0.01
:
# Low threshold for speeding up CI
fluid
.
io
.
save_inference_model
(
save_dirname
,
[
"pixel"
],
fluid
.
io
.
save_inference_model
(
save_dirname
,
[
"pixel"
],
...
@@ -239,7 +239,7 @@ def infer(use_cuda, save_dirname=None):
...
@@ -239,7 +239,7 @@ def infer(use_cuda, save_dirname=None):
np
.
testing
.
assert_almost_equal
(
np
.
testing
.
assert_almost_equal
(
results
[
0
][
i
],
transpiler_results
[
0
][
i
],
decimal
=
5
)
results
[
0
][
i
],
transpiler_results
[
0
][
i
],
decimal
=
5
)
print
(
(
"infer results: "
,
results
[
0
])
)
print
(
"infer results: "
,
results
[
0
]
)
fluid
.
io
.
save_inference_model
(
save_dirname
,
feed_target_names
,
fluid
.
io
.
save_inference_model
(
save_dirname
,
feed_target_names
,
fetch_targets
,
exe
,
fetch_targets
,
exe
,
...
...
python/paddle/fluid/tests/book/test_label_semantic_roles.py
浏览文件 @
91f0573b
...
@@ -189,10 +189,10 @@ def train(use_cuda, save_dirname=None, is_local=True):
...
@@ -189,10 +189,10 @@ def train(use_cuda, save_dirname=None, is_local=True):
cost
=
cost
[
0
]
cost
=
cost
[
0
]
if
batch_id
%
10
==
0
:
if
batch_id
%
10
==
0
:
print
(
(
"avg_cost:"
+
str
(
cost
)
))
print
(
"avg_cost:"
+
str
(
cost
))
if
batch_id
!=
0
:
if
batch_id
!=
0
:
print
(
(
"second per batch: "
+
str
(
print
(
"second per batch: "
+
str
((
time
.
time
(
(
time
.
time
()
-
start_time
)
/
batch_id
)
))
)
-
start_time
)
/
batch_id
))
# Set the threshold low to speed up the CI test
# Set the threshold low to speed up the CI test
if
float
(
cost
)
<
60.0
:
if
float
(
cost
)
<
60.0
:
if
save_dirname
is
not
None
:
if
save_dirname
is
not
None
:
...
@@ -248,14 +248,14 @@ def infer(use_cuda, save_dirname=None):
...
@@ -248,14 +248,14 @@ def infer(use_cuda, save_dirname=None):
fetch_targets
]
=
fluid
.
io
.
load_inference_model
(
save_dirname
,
exe
)
fetch_targets
]
=
fluid
.
io
.
load_inference_model
(
save_dirname
,
exe
)
# Setup input by creating LoDTensor to represent sequence of words.
# Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of the LoDTensor and the shape of
# Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to
# each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector.
# look up for the corresponding word vector.
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
# which has only one level of detail. Then the created LoDTensor will have only
# which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic
# one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of
# element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively.
# length 3, 4 and 2, respectively.
# Note that recursive_sequence_lengths should be a list of lists.
# Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens
=
[[
3
,
4
,
2
]]
recursive_seq_lens
=
[[
3
,
4
,
2
]]
base_shape
=
[
1
]
base_shape
=
[
1
]
...
@@ -333,9 +333,9 @@ def infer(use_cuda, save_dirname=None):
...
@@ -333,9 +333,9 @@ def infer(use_cuda, save_dirname=None):
},
},
fetch_list
=
fetch_targets
,
fetch_list
=
fetch_targets
,
return_numpy
=
False
)
return_numpy
=
False
)
print
(
(
results
[
0
].
recursive_sequence_lengths
()
))
print
(
results
[
0
].
recursive_sequence_lengths
(
))
np_data
=
np
.
array
(
results
[
0
])
np_data
=
np
.
array
(
results
[
0
])
print
(
(
"Inference Shape: "
,
np_data
.
shape
)
)
print
(
"Inference Shape: "
,
np_data
.
shape
)
def
main
(
use_cuda
,
is_local
=
True
):
def
main
(
use_cuda
,
is_local
=
True
):
...
...
python/paddle/fluid/tests/book/test_machine_translation.py
浏览文件 @
91f0573b
...
@@ -205,8 +205,8 @@ def train_main(use_cuda, is_sparse, is_local=True):
...
@@ -205,8 +205,8 @@ def train_main(use_cuda, is_sparse, is_local=True):
feed
=
feeder
.
feed
(
data
),
feed
=
feeder
.
feed
(
data
),
fetch_list
=
[
avg_cost
])
fetch_list
=
[
avg_cost
])
avg_cost_val
=
np
.
array
(
outs
[
0
])
avg_cost_val
=
np
.
array
(
outs
[
0
])
print
(
(
'pass_id='
+
str
(
pass_id
)
+
' batch='
+
str
(
batch_id
)
+
print
(
'pass_id='
+
str
(
pass_id
)
+
' batch='
+
str
(
batch_id
)
+
" avg_cost="
+
str
(
avg_cost_val
)
))
" avg_cost="
+
str
(
avg_cost_val
))
if
batch_id
>
3
:
if
batch_id
>
3
:
break
break
batch_id
+=
1
batch_id
+=
1
...
@@ -282,7 +282,7 @@ def decode_main(use_cuda, is_sparse):
...
@@ -282,7 +282,7 @@ def decode_main(use_cuda, is_sparse):
feed
=
feed_dict
,
feed
=
feed_dict
,
fetch_list
=
[
translation_ids
,
translation_scores
],
fetch_list
=
[
translation_ids
,
translation_scores
],
return_numpy
=
False
)
return_numpy
=
False
)
print
(
(
result_ids
.
recursive_sequence_lengths
()
))
print
(
result_ids
.
recursive_sequence_lengths
(
))
break
break
...
...
python/paddle/fluid/tests/book/test_recognize_digits.py
浏览文件 @
91f0573b
...
@@ -142,10 +142,10 @@ def train(nn_type,
...
@@ -142,10 +142,10 @@ def train(nn_type,
params_filename
=
params_filename
)
params_filename
=
params_filename
)
return
return
else
:
else
:
print
(
(
print
(
'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'
.
'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'
.
format
(
pass_id
,
batch_id
+
1
,
format
(
pass_id
,
batch_id
+
1
,
float
(
avg_loss_val
),
float
(
acc_val
)))
)
float
(
avg_loss_val
),
float
(
acc_val
)))
if
math
.
isnan
(
float
(
avg_loss_val
)):
if
math
.
isnan
(
float
(
avg_loss_val
)):
sys
.
exit
(
"got NaN loss, training failed."
)
sys
.
exit
(
"got NaN loss, training failed."
)
raise
AssertionError
(
"Loss of recognize digits is too large"
)
raise
AssertionError
(
"Loss of recognize digits is too large"
)
...
@@ -206,7 +206,7 @@ def infer(use_cuda,
...
@@ -206,7 +206,7 @@ def infer(use_cuda,
results
=
exe
.
run
(
inference_program
,
results
=
exe
.
run
(
inference_program
,
feed
=
{
feed_target_names
[
0
]:
tensor_img
},
feed
=
{
feed_target_names
[
0
]:
tensor_img
},
fetch_list
=
fetch_targets
)
fetch_list
=
fetch_targets
)
print
(
(
"infer results: "
,
results
[
0
])
)
print
(
"infer results: "
,
results
[
0
]
)
def
main
(
use_cuda
,
parallel
,
nn_type
,
combine
):
def
main
(
use_cuda
,
parallel
,
nn_type
,
combine
):
...
...
python/paddle/fluid/tests/book/test_recommender_system.py
浏览文件 @
91f0573b
...
@@ -260,15 +260,15 @@ def infer(use_cuda, save_dirname=None):
...
@@ -260,15 +260,15 @@ def infer(use_cuda, save_dirname=None):
# Use the first data from paddle.dataset.movielens.test() as input
# Use the first data from paddle.dataset.movielens.test() as input
assert
feed_target_names
[
0
]
==
"user_id"
assert
feed_target_names
[
0
]
==
"user_id"
# Use create_lod_tensor(data, recursive_sequence_lengths, place) API
# Use create_lod_tensor(data, recursive_sequence_lengths, place) API
# to generate LoD Tensor where `data` is a list of sequences of index
# to generate LoD Tensor where `data` is a list of sequences of index
# numbers, `recursive_sequence_lengths` is the length-based level of detail
# numbers, `recursive_sequence_lengths` is the length-based level of detail
# (lod) info associated with `data`.
# (lod) info associated with `data`.
# For example, data = [[10, 2, 3], [2, 3]] means that it contains
# For example, data = [[10, 2, 3], [2, 3]] means that it contains
# two sequences of indexes, of length 3 and 2, respectively.
# two sequences of indexes, of length 3 and 2, respectively.
# Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one
# Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one
# level of detail info, indicating that `data` consists of two sequences
# level of detail info, indicating that `data` consists of two sequences
# of length 3 and 2, respectively.
# of length 3 and 2, respectively.
user_id
=
fluid
.
create_lod_tensor
([[
1
]],
[[
1
]],
place
)
user_id
=
fluid
.
create_lod_tensor
([[
1
]],
[[
1
]],
place
)
assert
feed_target_names
[
1
]
==
"gender_id"
assert
feed_target_names
[
1
]
==
"gender_id"
...
@@ -304,7 +304,7 @@ def infer(use_cuda, save_dirname=None):
...
@@ -304,7 +304,7 @@ def infer(use_cuda, save_dirname=None):
},
},
fetch_list
=
fetch_targets
,
fetch_list
=
fetch_targets
,
return_numpy
=
False
)
return_numpy
=
False
)
print
(
(
"inferred score: "
,
np
.
array
(
results
[
0
])
))
print
(
"inferred score: "
,
np
.
array
(
results
[
0
]
))
def
main
(
use_cuda
):
def
main
(
use_cuda
):
...
...
python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py
浏览文件 @
91f0573b
...
@@ -182,8 +182,8 @@ def train(use_cuda, save_dirname=None):
...
@@ -182,8 +182,8 @@ def train(use_cuda, save_dirname=None):
fetch_list
=
[
avg_cost
])
fetch_list
=
[
avg_cost
])
avg_cost_val
=
np
.
array
(
outs
[
0
])
avg_cost_val
=
np
.
array
(
outs
[
0
])
print
(
(
'pass_id='
+
str
(
pass_id
)
+
' batch='
+
str
(
batch_id
)
+
print
(
'pass_id='
+
str
(
pass_id
)
+
' batch='
+
str
(
batch_id
)
+
" avg_cost="
+
str
(
avg_cost_val
)
))
" avg_cost="
+
str
(
avg_cost_val
))
if
math
.
isnan
(
float
(
avg_cost_val
[
0
])):
if
math
.
isnan
(
float
(
avg_cost_val
[
0
])):
sys
.
exit
(
"got NaN loss, training failed."
)
sys
.
exit
(
"got NaN loss, training failed."
)
if
batch_id
>
3
:
if
batch_id
>
3
:
...
@@ -213,14 +213,14 @@ def infer(use_cuda, save_dirname=None):
...
@@ -213,14 +213,14 @@ def infer(use_cuda, save_dirname=None):
fetch_targets
]
=
fluid
.
io
.
load_inference_model
(
save_dirname
,
exe
)
fetch_targets
]
=
fluid
.
io
.
load_inference_model
(
save_dirname
,
exe
)
# Setup input by creating LoDTensor to represent sequence of words.
# Setup input by creating LoDTensor to represent sequence of words.
# Here each word is the basic element of the LoDTensor and the shape of
# Here each word is the basic element of the LoDTensor and the shape of
# each word (base_shape) should be [1] since it is simply an index to
# each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector.
# look up for the corresponding word vector.
# Suppose the recursive_sequence_lengths info is set to [[4, 6]],
# Suppose the recursive_sequence_lengths info is set to [[4, 6]],
# which has only one level of detail. Then the created LoDTensor will have only
# which has only one level of detail. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic
# one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for two sentences of
# element (word). Hence the LoDTensor will hold data for two sentences of
# length 4 and 6, respectively.
# length 4 and 6, respectively.
# Note that recursive_sequence_lengths should be a list of lists.
# Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens
=
[[
4
,
6
]]
recursive_seq_lens
=
[[
4
,
6
]]
base_shape
=
[
1
]
base_shape
=
[
1
]
...
@@ -241,10 +241,10 @@ def infer(use_cuda, save_dirname=None):
...
@@ -241,10 +241,10 @@ def infer(use_cuda, save_dirname=None):
},
},
fetch_list
=
fetch_targets
,
fetch_list
=
fetch_targets
,
return_numpy
=
False
)
return_numpy
=
False
)
print
(
(
results
[
0
].
recursive_sequence_lengths
()
))
print
(
results
[
0
].
recursive_sequence_lengths
(
))
np_data
=
np
.
array
(
results
[
0
])
np_data
=
np
.
array
(
results
[
0
])
print
(
(
"Inference shape: "
,
np_data
.
shape
)
)
print
(
"Inference shape: "
,
np_data
.
shape
)
print
(
(
"Inference results: "
,
np_data
)
)
print
(
"Inference results: "
,
np_data
)
def
main
(
use_cuda
):
def
main
(
use_cuda
):
...
...
python/paddle/fluid/tests/book/test_word2vec.py
浏览文件 @
91f0573b
...
@@ -169,11 +169,11 @@ def infer(use_cuda, save_dirname=None):
...
@@ -169,11 +169,11 @@ def infer(use_cuda, save_dirname=None):
word_dict
=
paddle
.
dataset
.
imikolov
.
build_dict
()
word_dict
=
paddle
.
dataset
.
imikolov
.
build_dict
()
dict_size
=
len
(
word_dict
)
dict_size
=
len
(
word_dict
)
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
# is simply an index to look up for the corresponding word vector and hence
# is simply an index to look up for the corresponding word vector and hence
# the shape of word (base_shape) should be [1]. The recursive_sequence_lengths,
# the shape of word (base_shape) should be [1]. The recursive_sequence_lengths,
# which is length-based level of detail (lod) of each LoDTensor, should be [[1]]
# which is length-based level of detail (lod) of each LoDTensor, should be [[1]]
# meaning there is only one level of detail and there is only one sequence of
# meaning there is only one level of detail and there is only one sequence of
# one word on this level.
# one word on this level.
# Note that recursive_sequence_lengths should be a list of lists.
# Note that recursive_sequence_lengths should be a list of lists.
recursive_seq_lens
=
[[
1
]]
recursive_seq_lens
=
[[
1
]]
...
@@ -204,9 +204,9 @@ def infer(use_cuda, save_dirname=None):
...
@@ -204,9 +204,9 @@ def infer(use_cuda, save_dirname=None):
},
},
fetch_list
=
fetch_targets
,
fetch_list
=
fetch_targets
,
return_numpy
=
False
)
return_numpy
=
False
)
print
(
(
results
[
0
].
recursive_sequence_lengths
()
))
print
(
results
[
0
].
recursive_sequence_lengths
(
))
np_data
=
np
.
array
(
results
[
0
])
np_data
=
np
.
array
(
results
[
0
])
print
(
(
"Inference Shape: "
,
np_data
.
shape
)
)
print
(
"Inference Shape: "
,
np_data
.
shape
)
def
main
(
use_cuda
,
is_sparse
,
is_parallel
):
def
main
(
use_cuda
,
is_sparse
,
is_parallel
):
...
...
python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py
浏览文件 @
91f0573b
...
@@ -78,7 +78,7 @@ for pass_id in range(PASS_NUM):
...
@@ -78,7 +78,7 @@ for pass_id in range(PASS_NUM):
if
avg_loss_value
[
0
]
<
10.0
:
if
avg_loss_value
[
0
]
<
10.0
:
exit
(
0
)
# if avg cost less than 10.0, we think our code is good.
exit
(
0
)
# if avg cost less than 10.0, we think our code is good.
print
(
(
avg_loss_value
[
0
])
)
print
(
avg_loss_value
[
0
]
)
if
math
.
isnan
(
float
(
avg_loss_value
)):
if
math
.
isnan
(
float
(
avg_loss_value
)):
sys
.
exit
(
"got NaN loss, training failed."
)
sys
.
exit
(
"got NaN loss, training failed."
)
exit
(
1
)
exit
(
1
)
python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py
浏览文件 @
91f0573b
...
@@ -155,8 +155,8 @@ for pass_id in range(PASS_NUM):
...
@@ -155,8 +155,8 @@ for pass_id in range(PASS_NUM):
fetch_list
=
[
avg_cost
,
batch_acc
,
batch_size
])
fetch_list
=
[
avg_cost
,
batch_acc
,
batch_size
])
accuracy
.
add
(
value
=
acc
,
weight
=
weight
)
accuracy
.
add
(
value
=
acc
,
weight
=
weight
)
pass_acc
=
accuracy
.
eval
()
pass_acc
=
accuracy
.
eval
()
print
(
(
"loss:"
+
str
(
loss
)
+
" acc:"
+
str
(
acc
)
+
" pass_acc:"
+
print
(
"loss:"
+
str
(
loss
)
+
" acc:"
+
str
(
acc
)
+
" pass_acc:"
+
str
(
str
(
pass_acc
)
))
pass_acc
))
# this model is slow, so if we can train two mini batch, we think it works properly.
# this model is slow, so if we can train two mini batch, we think it works properly.
if
i
>
0
:
if
i
>
0
:
exit
(
0
)
exit
(
0
)
...
...
python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py
浏览文件 @
91f0573b
...
@@ -124,8 +124,8 @@ def main():
...
@@ -124,8 +124,8 @@ def main():
feed
=
feeder
.
feed
(
data
),
feed
=
feeder
.
feed
(
data
),
fetch_list
=
[
avg_cost
])
fetch_list
=
[
avg_cost
])
avg_cost_val
=
np
.
array
(
outs
[
0
])
avg_cost_val
=
np
.
array
(
outs
[
0
])
print
(
(
'pass_id='
+
str
(
pass_id
)
+
' batch='
+
str
(
batch_id
)
+
print
(
'pass_id='
+
str
(
pass_id
)
+
' batch='
+
str
(
batch_id
)
+
" avg_cost="
+
str
(
avg_cost_val
)
))
" avg_cost="
+
str
(
avg_cost_val
))
if
batch_id
>
2
:
if
batch_id
>
2
:
exit
(
0
)
exit
(
0
)
if
math
.
isnan
(
float
(
avg_cost_val
)):
if
math
.
isnan
(
float
(
avg_cost_val
)):
...
...
python/paddle/fluid/tests/demo/fc_gan.py
浏览文件 @
91f0573b
...
@@ -158,8 +158,8 @@ def main():
...
@@ -158,8 +158,8 @@ def main():
dg_loss_np
=
exe
.
run
(
dg_program
,
dg_loss_np
=
exe
.
run
(
dg_program
,
feed
=
{
'noise'
:
n
},
feed
=
{
'noise'
:
n
},
fetch_list
=
{
dg_loss
})[
0
]
fetch_list
=
{
dg_loss
})[
0
]
print
(
(
"Pass ID={0}, Batch ID={1}, D-Loss={2}, DG-Loss={3}"
.
format
(
print
(
"Pass ID={0}, Batch ID={1}, D-Loss={2}, DG-Loss={3}"
.
format
(
pass_id
,
batch_id
,
d_loss_np
,
dg_loss_np
))
)
pass_id
,
batch_id
,
d_loss_np
,
dg_loss_np
))
# generate image each batch
# generate image each batch
fig
=
plot
(
generated_img
)
fig
=
plot
(
generated_img
)
plt
.
savefig
(
plt
.
savefig
(
...
...
python/paddle/fluid/tests/test_detection.py
浏览文件 @
91f0573b
...
@@ -46,7 +46,7 @@ class TestDetection(unittest.TestCase):
...
@@ -46,7 +46,7 @@ class TestDetection(unittest.TestCase):
scores
=
scores
,
loc
=
loc
,
prior_box
=
pb
,
prior_box_var
=
pbv
)
scores
=
scores
,
loc
=
loc
,
prior_box
=
pb
,
prior_box_var
=
pbv
)
self
.
assertIsNotNone
(
out
)
self
.
assertIsNotNone
(
out
)
self
.
assertEqual
(
out
.
shape
[
-
1
],
6
)
self
.
assertEqual
(
out
.
shape
[
-
1
],
6
)
print
(
(
str
(
program
)
))
print
(
str
(
program
))
def
test_detection_api
(
self
):
def
test_detection_api
(
self
):
program
=
Program
()
program
=
Program
()
...
@@ -81,7 +81,7 @@ class TestDetection(unittest.TestCase):
...
@@ -81,7 +81,7 @@ class TestDetection(unittest.TestCase):
self
.
assertIsNotNone
(
trg
)
self
.
assertIsNotNone
(
trg
)
self
.
assertIsNotNone
(
trg_weight
)
self
.
assertIsNotNone
(
trg_weight
)
print
(
(
str
(
program
)
))
print
(
str
(
program
))
def
test_ssd_loss
(
self
):
def
test_ssd_loss
(
self
):
program
=
Program
()
program
=
Program
()
...
@@ -105,7 +105,7 @@ class TestDetection(unittest.TestCase):
...
@@ -105,7 +105,7 @@ class TestDetection(unittest.TestCase):
loss
=
layers
.
ssd_loss
(
loc
,
scores
,
gt_box
,
gt_label
,
pb
,
pbv
)
loss
=
layers
.
ssd_loss
(
loc
,
scores
,
gt_box
,
gt_label
,
pb
,
pbv
)
self
.
assertIsNotNone
(
loss
)
self
.
assertIsNotNone
(
loss
)
self
.
assertEqual
(
loss
.
shape
[
-
1
],
1
)
self
.
assertEqual
(
loss
.
shape
[
-
1
],
1
)
print
(
(
str
(
program
)
))
print
(
str
(
program
))
class
TestPriorBox
(
unittest
.
TestCase
):
class
TestPriorBox
(
unittest
.
TestCase
):
...
@@ -196,7 +196,7 @@ class TestDetectionMAP(unittest.TestCase):
...
@@ -196,7 +196,7 @@ class TestDetectionMAP(unittest.TestCase):
map_out
=
layers
.
detection_map
(
detect_res
,
label
,
21
)
map_out
=
layers
.
detection_map
(
detect_res
,
label
,
21
)
self
.
assertIsNotNone
(
map_out
)
self
.
assertIsNotNone
(
map_out
)
self
.
assertEqual
(
map_out
.
shape
,
(
1
,
))
self
.
assertEqual
(
map_out
.
shape
,
(
1
,
))
print
(
(
str
(
program
)
))
print
(
str
(
program
))
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/test_if_else_op.py
浏览文件 @
91f0573b
...
@@ -84,7 +84,7 @@ class TestMNISTIfElseOp(unittest.TestCase):
...
@@ -84,7 +84,7 @@ class TestMNISTIfElseOp(unittest.TestCase):
feed
=
{
'x'
:
x_data
,
feed
=
{
'x'
:
x_data
,
'y'
:
y_data
},
'y'
:
y_data
},
fetch_list
=
[
avg_loss
])
fetch_list
=
[
avg_loss
])
print
(
(
outs
[
0
])
)
print
(
outs
[
0
]
)
if
outs
[
0
]
<
1.0
:
if
outs
[
0
]
<
1.0
:
return
return
self
.
assertFalse
(
True
)
self
.
assertFalse
(
True
)
...
@@ -139,7 +139,7 @@ class TestMNISTIfElseOp(unittest.TestCase):
...
@@ -139,7 +139,7 @@ class TestMNISTIfElseOp(unittest.TestCase):
feed
=
{
'x'
:
x_data
,
feed
=
{
'x'
:
x_data
,
'y'
:
y_data
},
'y'
:
y_data
},
fetch_list
=
[
avg_loss
])
fetch_list
=
[
avg_loss
])
print
(
(
outs
[
0
])
)
print
(
outs
[
0
]
)
if
outs
[
0
]
<
1.0
:
if
outs
[
0
]
<
1.0
:
return
return
self
.
assertFalse
(
True
)
self
.
assertFalse
(
True
)
...
...
python/paddle/fluid/tests/unittests/benchmark.py
浏览文件 @
91f0573b
...
@@ -88,8 +88,8 @@ class BenchmarkSuite(OpTest):
...
@@ -88,8 +88,8 @@ class BenchmarkSuite(OpTest):
for
place
in
places
:
for
place
in
places
:
elapses
.
append
(
self
.
timeit_output_with_place
(
place
,
iters
))
elapses
.
append
(
self
.
timeit_output_with_place
(
place
,
iters
))
for
place
,
elapse
in
zip
(
places
,
elapses
):
for
place
,
elapse
in
zip
(
places
,
elapses
):
print
(
(
"One pass of ({2}_op) at {0} cost {1}"
.
format
(
print
(
"One pass of ({2}_op) at {0} cost {1}"
.
format
(
str
(
place
),
elapse
,
self
.
op_type
))
)
str
(
place
),
elapse
,
self
.
op_type
))
def
timeit_grad_with_place
(
self
,
place
,
iters
=
100
):
def
timeit_grad_with_place
(
self
,
place
,
iters
=
100
):
inputs_to_check
=
self
.
_get_input_names
()
inputs_to_check
=
self
.
_get_input_names
()
...
@@ -108,5 +108,5 @@ class BenchmarkSuite(OpTest):
...
@@ -108,5 +108,5 @@ class BenchmarkSuite(OpTest):
for
place
in
places
:
for
place
in
places
:
elapses
.
append
(
self
.
timeit_grad_with_place
(
place
,
iters
))
elapses
.
append
(
self
.
timeit_grad_with_place
(
place
,
iters
))
for
place
,
elapse
in
zip
(
places
,
elapses
):
for
place
,
elapse
in
zip
(
places
,
elapses
):
print
(
(
"One pass of ({2}_grad_op) at {0} cost {1}"
.
format
(
print
(
"One pass of ({2}_grad_op) at {0} cost {1}"
.
format
(
str
(
place
),
elapse
,
self
.
op_type
))
)
str
(
place
),
elapse
,
self
.
op_type
))
python/paddle/fluid/tests/unittests/parallel_executor_test_base.py
浏览文件 @
91f0573b
...
@@ -99,8 +99,8 @@ class TestParallelExecutorBase(unittest.TestCase):
...
@@ -99,8 +99,8 @@ class TestParallelExecutorBase(unittest.TestCase):
end
=
time
.
time
()
end
=
time
.
time
()
if
batch_size
is
not
None
:
if
batch_size
is
not
None
:
print
(
(
"%.4f Instance per second"
%
(
print
(
"%.4f Instance per second"
%
(
(
batch_size
*
iter
+
2
)
/
(
end
-
begin
)))
)
(
batch_size
*
iter
+
2
)
/
(
end
-
begin
)))
avg_last_loss_val
=
np
.
array
(
last_loss
).
mean
()
avg_last_loss_val
=
np
.
array
(
last_loss
).
mean
()
avg_first_loss_val
=
np
.
array
(
first_loss
).
mean
()
avg_first_loss_val
=
np
.
array
(
first_loss
).
mean
()
...
@@ -108,6 +108,6 @@ class TestParallelExecutorBase(unittest.TestCase):
...
@@ -108,6 +108,6 @@ class TestParallelExecutorBase(unittest.TestCase):
float
(
avg_first_loss_val
)):
float
(
avg_first_loss_val
)):
sys
.
exit
(
"got NaN loss, training failed."
)
sys
.
exit
(
"got NaN loss, training failed."
)
print
(
(
first_loss
,
last_loss
)
)
print
(
first_loss
,
last_loss
)
# self.assertGreater(first_loss[0], last_loss[0])
# self.assertGreater(first_loss[0], last_loss[0])
return
first_loss
,
last_loss
return
first_loss
,
last_loss
python/paddle/fluid/transpiler/memory_optimization_transpiler.py
浏览文件 @
91f0573b
...
@@ -246,11 +246,11 @@ class ControlFlowGraph(object):
...
@@ -246,11 +246,11 @@ class ControlFlowGraph(object):
continue
continue
if
PRINT_LOG
:
if
PRINT_LOG
:
print
((
(
"Hit Cache !!!! cache pool index "
print
((
"Hit Cache !!!! cache pool index "
"is %d, var name is %s, "
"is %d, var name is %s, "
"cached var name is %s, "
"cached var name is %s, "
"var shape is %s "
)
%
(
index
,
x
,
cache_var
,
"var shape is %s "
)
%
(
index
,
x
,
cache_var
,
str
(
cache_shape
)
)))
str
(
cache_shape
)))
self
.
pool
.
pop
(
index
)
self
.
pool
.
pop
(
index
)
if
x
==
cache_var
:
if
x
==
cache_var
:
break
break
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录