Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
27cf7f33
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
27cf7f33
编写于
11月 27, 2017
作者:
S
sweetsky0901
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into my_unpool_max_2d
上级
cfd7721b
a619695b
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
146 addition
and
222 deletion
+146
-222
cmake/external/gflags.cmake
cmake/external/gflags.cmake
+2
-9
paddle/operators/math/selected_rows_functor.cu
paddle/operators/math/selected_rows_functor.cu
+0
-1
python/paddle/v2/fluid/evaluator.py
python/paddle/v2/fluid/evaluator.py
+96
-151
python/paddle/v2/fluid/layers.py
python/paddle/v2/fluid/layers.py
+30
-8
python/paddle/v2/fluid/tests/book/test_image_classification_train.py
...le/v2/fluid/tests/book/test_image_classification_train.py
+5
-39
python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py
.../paddle/v2/fluid/tests/book/test_recognize_digits_conv.py
+2
-2
python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py
...n/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py
+5
-6
python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py
...dle/v2/fluid/tests/book/test_understand_sentiment_conv.py
+3
-3
python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py
...luid/tests/book/test_understand_sentiment_dynamic_lstm.py
+3
-3
未找到文件。
cmake/external/gflags.cmake
浏览文件 @
27cf7f33
...
...
@@ -28,15 +28,8 @@ INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIR})
ExternalProject_Add
(
extern_gflags
${
EXTERNAL_PROJECT_LOG_ARGS
}
# TODO(yiwang): The annoying warnings mentioned in
# https://github.com/PaddlePaddle/Paddle/issues/3277 are caused by
# gflags. I fired a PR https://github.com/gflags/gflags/pull/230
# to fix it. Before it gets accepted by the gflags team, we use
# my personal fork, which contains above fix, temporarily. Let's
# change this back to the official Github repo once my PR is
# merged.
GIT_REPOSITORY
"https://github.com/wangkuiyi/gflags.git"
GIT_TAG 986964c07427ecb9cdb5bd73f73ebbd40e54dadb
GIT_REPOSITORY
"https://github.com/gflags/gflags.git"
GIT_TAG 77592648e3f3be87d6c7123eb81cbad75f9aef5a
PREFIX
${
GFLAGS_SOURCES_DIR
}
UPDATE_COMMAND
""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=
${
CMAKE_CXX_COMPILER
}
...
...
paddle/operators/math/selected_rows_functor.cu
浏览文件 @
27cf7f33
...
...
@@ -227,7 +227,6 @@ template struct SelectedRowsAddToTensor<platform::GPUPlace, float>;
template
struct
SelectedRowsAddToTensor
<
platform
::
GPUPlace
,
double
>;
template
struct
SelectedRowsAddToTensor
<
platform
::
GPUPlace
,
int
>;
template
struct
SelectedRowsAddToTensor
<
platform
::
GPUPlace
,
int64_t
>;
}
// namespace math
}
// namespace operators
}
// namespace paddle
python/paddle/v2/fluid/evaluator.py
浏览文件 @
27cf7f33
import
numpy
as
np
from
paddle.v2.fluid.framework
import
Program
,
g_main_program
,
unique_name
,
Variable
import
paddle.v2.fluid.core
as
core
import
paddle.v2.fluid.layers
as
layers
from
paddle.v2.fluid.framework
import
Program
,
unique_name
,
\
Variable
from
paddle.v2.fluid.layer_helper
import
LayerHelper
def
_clone_var_in_block_
(
block
,
var
):
__all__
=
[
'Accuracy'
]
def
_clone_var_
(
block
,
var
):
assert
isinstance
(
var
,
Variable
)
return
block
.
create_var
(
name
=
var
.
name
,
...
...
@@ -16,175 +21,115 @@ def _clone_var_in_block_(block, var):
class
Evaluator
(
object
):
"""
Evalutor Base class.
create metric states
add mini-batch evaluator caculate operator
add increment operator to accumulate the metric states
Base Class for all evaluators
Args:
name(str): The name of evaluator. such as, "accuracy". Used for generate
temporary variable name.
main_program(Program, optional): The evaluator should be added to this
main_program. Default g_main_program
startup_program(Program, optional):The parameter should be added to this
startup_program. Default g_startup_program
Attributes:
states(list): The list of state variables. states will be reset to zero
when `reset` is invoked.
metrics(list): The list of metrics variables. They will be calculate
every mini-batch
"""
def
__init__
(
self
,
name
,
**
kwargs
):
self
.
states
=
[]
self
.
metrics
=
[]
self
.
helper
=
LayerHelper
(
name
,
**
kwargs
)
def
reset
(
self
,
executor
,
reset_program
=
None
):
"""
init the global states
reset metric states at the begin of each pass/user specified batch
"""
self
.
_states
=
{}
if
kwargs
.
has_key
(
"main_program"
):
self
.
_main_program
=
kwargs
.
get
(
"main_program"
)
else
:
self
.
_main_program
=
g_main_program
if
reset_program
is
None
:
reset_program
=
Program
()
for
var
in
self
.
states
:
assert
isinstance
(
var
,
Variable
)
g_var
=
_clone_var_
(
reset_program
.
current_block
(),
var
)
layers
.
fill_constant
(
shape
=
g_var
.
shape
,
value
=
0.0
,
dtype
=
g_var
.
dtype
,
out
=
g_var
,
main_program
=
reset_program
)
def
states
(
self
):
return
self
.
_states
executor
.
run
(
reset_program
)
def
_update_ops
(
self
,
*
args
,
**
kwargs
):
def
eval
(
self
,
executor
,
eval_program
=
None
):
"""
append update ops to the global states
Evaluate the statistics merged by multiple mini-batches.
"""
raise
NotImplementedError
()
def
reset
(
self
,
executor
,
reset_program
=
Non
e
):
def
create_state
(
self
,
suffix
,
dtype
,
shap
e
):
"""
Clear metric states at the begin of each pass/user specified batch
"""
if
reset_program
==
None
:
reset_program
=
Program
()
else
:
reset_program
=
program
block
=
reset_program
.
global_block
()
for
k
,
var
in
self
.
_states
.
iteritems
():
g_var
=
_clone_var_in_block_
(
block
,
var
)
zeros
=
block
.
create_var
(
dtype
=
"float32"
,
persistable
=
True
)
block
.
append_op
(
type
=
"fill_constant"
,
outputs
=
{
"Out"
:
[
zeros
]},
attrs
=
{
"shape"
:
g_var
.
shape
,
"value"
:
.
0
,
"dtype"
:
5
,
})
block
.
append_op
(
type
=
"scale"
,
inputs
=
{
"X"
:
zeros
},
outputs
=
{
"Out"
:
g_var
})
executor
.
run
(
reset_program
,
fetch_list
=
self
.
_states
.
values
())
Create state variable.
NOTE: It is not a public API.
Args:
suffix(str): the state suffix.
dtype(str|core.DataType): the state data type
shape(tuple|list): the shape of state
Returns: State variable
def
eval
(
self
,
executor
,
eval_program
=
None
):
"""
Merge the mini-batch statistics to form the evaluation result for multiple mini-batches.
"""
raise
NotImplementedError
()
state
=
self
.
helper
.
create_variable
(
name
=
"_"
.
join
([
unique_name
(
self
.
helper
.
name
),
suffix
]),
persistable
=
True
,
dtype
=
dtype
,
shape
=
shape
)
self
.
states
.
append
(
state
)
return
state
class
Accuracy
(
Evaluator
):
"""
A
ccuracy need two state variable Total, Correct
A
verage Accuracy for multiple mini-batches.
"""
def
__init__
(
self
,
*
args
,
**
kwargs
):
def
__init__
(
self
,
input
,
label
,
k
=
1
,
**
kwargs
):
super
(
Accuracy
,
self
).
__init__
(
"accuracy"
,
**
kwargs
)
block
=
self
.
_main_program
.
global_block
()
g_total
=
block
.
create_var
(
name
=
unique_name
(
"Total"
),
persistable
=
True
,
dtype
=
"int64"
,
shape
=
[
1
])
g_correct
=
block
.
create_var
(
name
=
unique_name
(
"Correct"
),
persistable
=
True
,
dtype
=
"int64"
,
shape
=
[
1
])
self
.
_states
[
"Total"
]
=
g_total
self
.
_states
[
"Correct"
]
=
g_correct
def
_update_ops
(
self
,
input
,
label
,
k
=
1
,
**
kwargs
):
block
=
self
.
_main_program
.
global_block
()
topk_out
=
block
.
create_var
(
dtype
=
input
.
dtype
)
topk_indices
=
block
.
create_var
(
dtype
=
"int64"
)
block
.
append_op
(
type
=
"top_k"
,
inputs
=
{
"X"
:
[
input
]},
outputs
=
{
"Out"
:
[
topk_out
],
"Indices"
:
[
topk_indices
]},
attrs
=
{
"k"
:
k
})
acc_out
=
block
.
create_var
(
dtype
=
kwargs
.
get
(
"out_dtype"
,
"float32"
))
correct
=
block
.
create_var
(
dtype
=
"int64"
,
persistable
=
True
)
total
=
block
.
create_var
(
dtype
=
"int64"
,
persistable
=
True
)
block
.
append_op
(
type
=
"accuracy"
,
inputs
=
{
"Out"
:
[
topk_out
],
"Indices"
:
[
topk_indices
],
"Label"
:
[
label
]
},
outputs
=
{
"Accuracy"
:
[
acc_out
],
"Correct"
:
[
correct
],
"Total"
:
[
total
],
})
block
.
append_op
(
type
=
"cast"
,
inputs
=
{
"X"
:
[
self
.
_states
[
"Total"
]]},
outputs
=
{
"Out"
:
[
self
.
_states
[
"Total"
]]},
attrs
=
{
"in_dtype"
:
5
,
# float32
"out_dtype"
:
2
,
# int32
})
block
.
append_op
(
type
=
"cast"
,
inputs
=
{
"X"
:
[
self
.
_states
[
"Correct"
]]},
outputs
=
{
"Out"
:
[
self
.
_states
[
"Correct"
]]},
attrs
=
{
"in_dtype"
:
5
,
"out_dtype"
:
2
,
})
block
.
append_op
(
type
=
"elementwise_add"
,
inputs
=
{
"X"
:
[
self
.
_states
[
"Total"
]],
"Y"
:
[
total
]},
outputs
=
{
"Out"
:
[
self
.
_states
[
"Total"
]]})
block
.
append_op
(
type
=
"elementwise_add"
,
inputs
=
{
"X"
:
[
self
.
_states
[
"Correct"
]],
"Y"
:
[
correct
]},
outputs
=
{
"Out"
:
[
self
.
_states
[
"Correct"
]]})
return
acc_out
main_program
=
self
.
helper
.
main_program
if
main_program
.
current_block
().
idx
!=
0
:
raise
ValueError
(
"You can only invoke Evaluator in root block"
)
self
.
total
=
self
.
create_state
(
dtype
=
'int64'
,
shape
=
[
1
],
suffix
=
'total'
)
self
.
correct
=
self
.
create_state
(
dtype
=
'int64'
,
shape
=
[
1
],
suffix
=
'correct'
)
kwargs
=
{
'main_program'
:
main_program
}
total
=
self
.
helper
.
create_tmp_variable
(
dtype
=
'int'
)
correct
=
self
.
helper
.
create_tmp_variable
(
dtype
=
'int'
)
acc
=
layers
.
accuracy
(
input
=
input
,
label
=
label
,
k
=
k
,
total
=
total
,
correct
=
correct
,
**
kwargs
)
total
=
layers
.
cast
(
x
=
total
,
dtype
=
'int64'
,
**
kwargs
)
correct
=
layers
.
cast
(
x
=
correct
,
dtype
=
'int64'
,
**
kwargs
)
layers
.
sums
(
input
=
[
self
.
total
,
total
],
out
=
self
.
total
,
**
kwargs
)
layers
.
sums
(
input
=
[
self
.
correct
,
correct
],
out
=
self
.
correct
,
**
kwargs
)
self
.
metrics
.
append
(
acc
)
def
eval
(
self
,
executor
,
eval_program
=
None
):
if
eval_program
!=
None
:
eval_program
=
eval_program
else
:
if
eval_program
is
None
:
eval_program
=
Program
()
block
=
eval_program
.
global_block
()
eval_out
=
block
.
create_var
(
dtype
=
self
.
_states
[
"Total"
].
dtype
)
e_total
=
_clone_var_in_block_
(
block
,
self
.
_states
[
"Total"
])
e_correct
=
_clone_var_in_block_
(
block
,
self
.
_states
[
"Correct"
])
block
.
append_op
(
type
=
"cast"
,
inputs
=
{
"X"
:
[
e_total
]},
outputs
=
{
"Out"
:
[
e_total
]},
attrs
=
{
"in_dtype"
:
2
,
# int32
"out_dtype"
:
5
,
# float32
})
block
.
append_op
(
type
=
"cast"
,
inputs
=
{
"X"
:
[
e_correct
]},
outputs
=
{
"Out"
:
[
e_correct
]},
attrs
=
{
"in_dtype"
:
2
,
"out_dtype"
:
5
,
})
block
.
append_op
(
type
=
"elementwise_div"
,
inputs
=
{
"X"
:
e_correct
,
"Y"
:
e_total
},
outputs
=
{
"Out"
:
eval_out
})
out
=
executor
.
run
(
eval_program
,
fetch_list
=
[
eval_out
])
return
np
.
array
(
out
[
0
])
def
accuracy
(
*
args
,
**
kwargs
):
cls
=
Accuracy
(
*
args
,
**
kwargs
)
out
=
cls
.
_update_ops
(
*
args
,
**
kwargs
)
return
cls
,
out
block
=
eval_program
.
current_block
()
kwargs
=
{
'main_program'
:
eval_program
}
total
=
_clone_var_
(
block
,
self
.
total
)
correct
=
_clone_var_
(
block
,
self
.
correct
)
total
=
layers
.
cast
(
total
,
dtype
=
'float32'
,
**
kwargs
)
correct
=
layers
.
cast
(
correct
,
dtype
=
'float32'
,
**
kwargs
)
out
=
layers
.
elementwise_div
(
x
=
correct
,
y
=
total
,
**
kwargs
)
return
np
.
array
(
executor
.
run
(
eval_program
,
fetch_list
=
[
out
])[
0
])
python/paddle/v2/fluid/layers.py
浏览文件 @
27cf7f33
...
...
@@ -418,6 +418,7 @@ def _create_op_func_(op_type):
_create_op_func_
(
'mean'
)
_create_op_func_
(
'mul'
)
_create_op_func_
(
'elementwise_add'
)
_create_op_func_
(
'elementwise_div'
)
_create_op_func_
(
'dropout'
)
_create_op_func_
(
'reshape'
)
_create_op_func_
(
'sigmoid'
)
...
...
@@ -457,13 +458,14 @@ def concat(input, axis, main_program=None, startup_program=None):
return
out
def
sums
(
input
,
main_program
=
None
,
startup_program
=
None
):
def
sums
(
input
,
out
=
None
,
main_program
=
None
,
startup_program
=
None
):
"""
This function takes in the input and performs the sum operation on it
and returns that as the output.
"""
helper
=
LayerHelper
(
'sum'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
())
if
out
is
None
:
out
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'sum'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
})
return
out
...
...
@@ -606,7 +608,7 @@ def square_error_cost(input, label, **kwargs):
return
square_out
def
accuracy
(
input
,
label
,
k
=
1
,
**
kwargs
):
def
accuracy
(
input
,
label
,
k
=
1
,
correct
=
None
,
total
=
None
,
**
kwargs
):
"""
This function computes the accuracy using the input and label.
The output is the top_k inputs and their indices.
...
...
@@ -620,10 +622,11 @@ def accuracy(input, label, k=1, **kwargs):
outputs
=
{
"Out"
:
[
topk_out
],
"Indices"
:
[
topk_indices
]},
attrs
=
{
"k"
:
k
})
acc_out_dtype
=
kwargs
.
get
(
"out_dtype"
,
"float32"
)
acc_out
=
helper
.
create_tmp_variable
(
dtype
=
"float32"
)
correct
=
helper
.
create_tmp_variable
(
dtype
=
"int64"
)
total
=
helper
.
create_tmp_variable
(
dtype
=
"int64"
)
if
correct
is
None
:
correct
=
helper
.
create_tmp_variable
(
dtype
=
"int64"
)
if
total
is
None
:
total
=
helper
.
create_tmp_variable
(
dtype
=
"int64"
)
helper
.
append_op
(
type
=
"accuracy"
,
inputs
=
{
...
...
@@ -1355,6 +1358,19 @@ def lod_rank_table(x, level=0, main_program=None):
return
table
def
topk
(
input
,
k
,
main_program
=
None
,
startup_program
=
None
):
helper
=
LayerHelper
(
'topk'
,
**
locals
())
topk_out
=
helper
.
create_tmp_variable
(
dtype
=
input
.
data_type
)
topk_indices
=
helper
.
create_tmp_variable
(
dtype
=
'int64'
)
helper
.
append_op
(
type
=
'top_k'
,
inputs
=
{
'X'
:
[
input
]},
outputs
=
{
'Out'
:
[
topk_out
],
'Indices'
:
[
topk_indices
]},
attrs
=
{
'k'
:
k
})
return
topk_out
,
topk_indices
def
lod_tensor_to_array
(
x
,
table
,
main_program
=
None
):
"""
This function creates an operator to convert an LOD_Tensor to
...
...
@@ -1388,14 +1404,20 @@ def array_to_lod_tensor(x, table, main_program=None):
return
tmp
def
fill_constant
(
shape
,
dtype
,
value
,
main_program
=
None
,
startup_program
=
None
):
def
fill_constant
(
shape
,
dtype
,
value
,
out
=
None
,
main_program
=
None
,
startup_program
=
None
):
"""
This function creates a tensor , with shape as mentioned in the input and
specified dtype and fills this up with a constant value that
comes in the input. It also sets the stop_gradient to be True.
"""
helper
=
LayerHelper
(
"fill_constant"
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
dtype
)
if
out
is
None
:
out
=
helper
.
create_tmp_variable
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'fill_constant'
,
inputs
=
{},
...
...
python/paddle/v2/fluid/tests/book/test_image_classification_train.py
浏览文件 @
27cf7f33
...
...
@@ -5,7 +5,6 @@ import paddle.v2.fluid.framework as framework
import
paddle.v2.fluid.layers
as
layers
import
paddle.v2.fluid.nets
as
nets
import
paddle.v2.fluid.evaluator
as
evaluator
from
paddle.v2.fluid.io
import
get_inference_program
from
paddle.v2.fluid.executor
import
Executor
from
paddle.v2.fluid.initializer
import
XavierInitializer
from
paddle.v2.fluid.optimizer
import
AdamOptimizer
...
...
@@ -110,18 +109,16 @@ avg_cost = layers.mean(x=cost)
optimizer
=
AdamOptimizer
(
learning_rate
=
0.001
)
opts
=
optimizer
.
minimize
(
avg_cost
)
accuracy
,
acc_out
=
evaluator
.
a
ccuracy
(
input
=
predict
,
label
=
label
)
accuracy
=
evaluator
.
A
ccuracy
(
input
=
predict
,
label
=
label
)
BATCH_SIZE
=
128
PASS_NUM
=
1
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
cifar
.
train10
(),
buf_size
=
BATCH_SIZE
*
10
),
paddle
.
dataset
.
cifar
.
train10
(),
buf_size
=
128
*
10
),
batch_size
=
BATCH_SIZE
)
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
cifar
.
test10
(),
batch_size
=
BATCH_SIZE
)
place
=
core
.
CPUPlace
()
exe
=
Executor
(
place
)
...
...
@@ -147,46 +144,15 @@ for pass_id in range(PASS_NUM):
outs
=
exe
.
run
(
framework
.
default_main_program
(),
feed
=
{
"pixel"
:
tensor_img
,
"label"
:
tensor_y
},
fetch_list
=
[
avg_cost
,
acc_out
]
)
fetch_list
=
[
avg_cost
]
+
accuracy
.
metrics
)
loss
=
np
.
array
(
outs
[
0
])
acc
=
np
.
array
(
outs
[
1
])
pass_acc
=
accuracy
.
eval
(
exe
)
batch_id
=
batch_id
+
1
test_accuracy
,
test_acc_out
=
evaluator
.
accuracy
(
input
=
predict
,
label
=
label
)
test_target
=
[
avg_cost
,
test_acc_out
]
+
test_accuracy
.
states
().
values
()
inference_program
=
get_inference_program
(
test_target
)
test_accuracy
.
reset
(
exe
)
for
data
in
test_reader
():
x_data
=
np
.
array
(
map
(
lambda
x
:
x
[
0
].
reshape
(
data_shape
),
data
)).
astype
(
"float32"
)
y_data
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"int64"
)
y_data
=
np
.
expand_dims
(
y_data
,
axis
=
1
)
tensor_x
=
core
.
LoDTensor
()
tensor_x
.
set
(
x_data
,
place
)
tensor_y
=
core
.
LoDTensor
()
tensor_y
.
set
(
y_data
,
place
)
outs
=
exe
.
run
(
inference_program
,
feed
=
{
'pixel'
:
tensor_x
,
'label'
:
tensor_y
},
fetch_list
=
[
avg_cost
,
test_acc_out
])
out
=
np
.
array
(
outs
[
0
])
acc
=
np
.
array
(
outs
[
1
])
test_pass_acc
=
test_accuracy
.
eval
(
exe
)
print
(
"pass_id:"
+
str
(
pass_id
)
+
" batch_id:"
+
str
(
batch_id
)
+
" loss:"
+
str
(
loss
)
+
" acc:"
+
str
(
acc
)
+
" pass_acc:"
+
str
(
pass_acc
)
+
" test_pass_acc:"
+
str
(
test_pass_acc
))
pass_acc
))
batch_id
=
batch_id
+
1
if
batch_id
>
1
:
# this model is slow, so if we can train two mini batch, we think it works properly.
...
...
python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py
浏览文件 @
27cf7f33
...
...
@@ -31,7 +31,7 @@ avg_cost = layers.mean(x=cost)
optimizer
=
AdamOptimizer
(
learning_rate
=
0.01
,
beta1
=
0.9
,
beta2
=
0.999
)
opts
=
optimizer
.
minimize
(
avg_cost
)
accuracy
,
acc_out
=
evaluator
.
a
ccuracy
(
input
=
predict
,
label
=
label
)
accuracy
=
evaluator
.
A
ccuracy
(
input
=
predict
,
label
=
label
)
BATCH_SIZE
=
50
PASS_NUM
=
3
...
...
@@ -61,7 +61,7 @@ for pass_id in range(PASS_NUM):
outs
=
exe
.
run
(
framework
.
default_main_program
(),
feed
=
{
"pixel"
:
tensor_img
,
"label"
:
tensor_y
},
fetch_list
=
[
avg_cost
,
acc_out
]
)
fetch_list
=
[
avg_cost
]
+
accuracy
.
metrics
)
loss
=
np
.
array
(
outs
[
0
])
acc
=
np
.
array
(
outs
[
1
])
pass_acc
=
accuracy
.
eval
(
exe
)
...
...
python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py
浏览文件 @
27cf7f33
...
...
@@ -36,7 +36,7 @@ avg_cost = layers.mean(x=cost)
optimizer
=
MomentumOptimizer
(
learning_rate
=
0.001
,
momentum
=
0.9
)
opts
=
optimizer
.
minimize
(
avg_cost
)
accuracy
,
acc_out
=
evaluator
.
a
ccuracy
(
input
=
predict
,
label
=
label
)
accuracy
=
evaluator
.
A
ccuracy
(
input
=
predict
,
label
=
label
)
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
...
...
@@ -67,15 +67,14 @@ for pass_id in range(PASS_NUM):
outs
=
exe
.
run
(
framework
.
default_main_program
(),
feed
=
{
'x'
:
tensor_x
,
'y'
:
tensor_y
},
fetch_list
=
[
avg_cost
,
acc_out
]
)
fetch_list
=
[
avg_cost
]
+
accuracy
.
metrics
)
out
=
np
.
array
(
outs
[
0
])
acc
=
np
.
array
(
outs
[
1
])
pass_acc
=
accuracy
.
eval
(
exe
)
test_accuracy
,
test_acc_out
=
evaluator
.
accuracy
(
input
=
predict
,
label
=
label
)
test_accuracy
=
evaluator
.
Accuracy
(
input
=
predict
,
label
=
label
)
test_target
=
[
avg_cost
,
test_acc_out
]
+
test_accuracy
.
states
().
values
()
test_target
=
[
avg_cost
]
+
test_accuracy
.
metrics
+
test_accuracy
.
states
inference_program
=
get_inference_program
(
test_target
)
test_accuracy
.
reset
(
exe
)
...
...
@@ -93,7 +92,7 @@ for pass_id in range(PASS_NUM):
outs
=
exe
.
run
(
inference_program
,
feed
=
{
'x'
:
tensor_x
,
'y'
:
tensor_y
},
fetch_list
=
[
avg_cost
,
test_acc_out
]
)
fetch_list
=
[
avg_cost
]
+
test_accuracy
.
metrics
)
out
=
np
.
array
(
outs
[
0
])
acc
=
np
.
array
(
outs
[
1
])
...
...
python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py
浏览文件 @
27cf7f33
...
...
@@ -32,9 +32,9 @@ def convolution_net(input_dim, class_dim=2, emb_dim=32, hid_dim=32):
cost
=
layers
.
cross_entropy
(
input
=
prediction
,
label
=
label
)
avg_cost
=
layers
.
mean
(
x
=
cost
)
adam_optimizer
=
AdamOptimizer
(
learning_rate
=
0.002
)
opts
=
adam_optimizer
.
minimize
(
avg_cost
)
accuracy
,
acc_out
=
evaluator
.
a
ccuracy
(
input
=
prediction
,
label
=
label
)
return
avg_cost
,
accuracy
,
acc
_out
adam_optimizer
.
minimize
(
avg_cost
)
accuracy
=
evaluator
.
A
ccuracy
(
input
=
prediction
,
label
=
label
)
return
avg_cost
,
accuracy
,
acc
uracy
.
metrics
[
0
]
def
to_lodtensor
(
data
,
place
):
...
...
python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py
浏览文件 @
27cf7f33
...
...
@@ -41,9 +41,9 @@ def stacked_lstm_net(input_dim,
cost
=
layers
.
cross_entropy
(
input
=
prediction
,
label
=
label
)
avg_cost
=
layers
.
mean
(
x
=
cost
)
adam_optimizer
=
AdamOptimizer
(
learning_rate
=
0.002
)
opts
=
adam_optimizer
.
minimize
(
avg_cost
)
accuracy
,
acc_out
=
evaluator
.
a
ccuracy
(
input
=
prediction
,
label
=
label
)
return
avg_cost
,
accuracy
,
acc
_out
adam_optimizer
.
minimize
(
avg_cost
)
accuracy
=
evaluator
.
A
ccuracy
(
input
=
prediction
,
label
=
label
)
return
avg_cost
,
accuracy
,
acc
uracy
.
metrics
[
0
]
def
to_lodtensor
(
data
,
place
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录