Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
b8f557f2
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b8f557f2
编写于
11月 09, 2017
作者:
D
Dong Zhihong
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
"add elementwise_add more type"
上级
e34e1293
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
194 addition
and
58 deletion
+194
-58
paddle/operators/accuracy_op.h
paddle/operators/accuracy_op.h
+2
-2
paddle/operators/elementwise_add_op.cc
paddle/operators/elementwise_add_op.cc
+8
-2
python/paddle/v2/framework/evaluator.py
python/paddle/v2/framework/evaluator.py
+151
-39
python/paddle/v2/framework/framework.py
python/paddle/v2/framework/framework.py
+1
-1
python/paddle/v2/framework/layers.py
python/paddle/v2/framework/layers.py
+8
-2
python/paddle/v2/framework/tests/test_accuracy_op.py
python/paddle/v2/framework/tests/test_accuracy_op.py
+2
-2
python/paddle/v2/framework/tests/test_recognize_digits_conv.py
...n/paddle/v2/framework/tests/test_recognize_digits_conv.py
+22
-10
未找到文件。
paddle/operators/accuracy_op.h
浏览文件 @
b8f557f2
...
...
@@ -45,9 +45,9 @@ class AccuracyKernel : public framework::OpKernel<T> {
auto
*
correct
=
ctx
.
Output
<
Tensor
>
(
"Correct"
);
auto
*
total
=
ctx
.
Output
<
Tensor
>
(
"Total"
);
float
*
correct_data
=
correct
->
mutable_data
<
float
>
(
ctx
.
GetPlace
());
int
*
accuracy_data
=
accuracy
->
mutable_data
<
int
>
(
ctx
.
GetPlace
());
int
*
correct_data
=
correct
->
mutable_data
<
int
>
(
ctx
.
GetPlace
());
int
*
total_data
=
total
->
mutable_data
<
int
>
(
ctx
.
GetPlace
());
float
*
accuracy_data
=
accuracy
->
mutable_data
<
float
>
(
ctx
.
GetPlace
());
const
int64_t
*
indices_data
=
indices
->
data
<
int64_t
>
();
const
int64_t
*
label_data
=
label
->
data
<
int64_t
>
();
...
...
paddle/operators/elementwise_add_op.cc
浏览文件 @
b8f557f2
...
...
@@ -34,7 +34,13 @@ REGISTER_OP(elementwise_add, ops::ElementwiseOp, ops::ElementwiseAddOpMaker,
elementwise_add_grad
,
ops
::
ElementwiseOpGrad
);
REGISTER_OP_CPU_KERNEL
(
elementwise_add
,
ops
::
ElementwiseAddKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
ops
::
ElementwiseAddKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
,
ops
::
ElementwiseAddKernel
<
paddle
::
platform
::
CPUPlace
,
double
>
,
ops
::
ElementwiseAddKernel
<
paddle
::
platform
::
CPUPlace
,
int
>
,
ops
::
ElementwiseAddKernel
<
paddle
::
platform
::
CPUPlace
,
int64_t
>
);
REGISTER_OP_CPU_KERNEL
(
elementwise_add_grad
,
ops
::
ElementwiseAddGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
ops
::
ElementwiseAddGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
,
ops
::
ElementwiseAddGradKernel
<
paddle
::
platform
::
CPUPlace
,
double
>
,
ops
::
ElementwiseAddGradKernel
<
paddle
::
platform
::
CPUPlace
,
int
>
,
ops
::
ElementwiseAddGradKernel
<
paddle
::
platform
::
CPUPlace
,
int64_t
>
);
python/paddle/v2/framework/evaluator.py
浏览文件 @
b8f557f2
from
paddle.v2.framework.framework
import
Program
,
g_main_program
,
unique_name
from
paddle.v2.framework.layer_helper
import
LayerHelper
from
paddle.v2.framework.framework
import
Program
,
g_main_program
,
unique_name
,
Variable
import
paddle.v2.framework.core
as
core
def
_clone_var_in_block_
(
block
,
var
):
assert
isinstance
(
var
,
Variable
)
return
block
.
create_var
(
name
=
var
.
name
,
shape
=
var
.
shape
,
dtype
=
var
.
data_type
,
type
=
var
.
type
,
lod_level
=
var
.
lod_level
,
persistable
=
True
)
class
Evaluator
(
object
):
"""
Evalutor Base class.
...
...
@@ -13,33 +23,49 @@ class Evaluator(object):
"""
def
__init__
(
self
,
name
,
**
kwargs
):
"""
init the global states
"""
self
.
_states
=
{}
if
kwargs
.
has_key
(
"program"
):
self
.
_program
=
kwargs
.
get
(
"program"
)
if
kwargs
.
has_key
(
"main_program"
):
self
.
_main_program
=
kwargs
.
get
(
"main_program"
)
else
:
self
.
_main_program
=
g_main_program
if
kwargs
.
has_key
(
"eval_program"
):
self
.
_eval_program
=
kwargs
.
get
(
"eval_program"
)
else
:
self
.
_program
=
g_main_program
self
.
_eval_program
=
Program
()
def
_update_ops
(
self
):
"""
append update ops to the global states
"""
raise
NotImplementedError
()
def
reset
(
self
,
executor
,
program
=
None
):
"""
Clear metric states at the begin of each pass/user specified batch
"""
Clear metric states at the begin of each pass/user specified batch
"""
if
program
==
None
:
reset_program
=
Program
()
else
:
reset_program
=
program
block
=
reset_program
.
global_block
()
for
k
,
var
in
self
.
_states
.
iteritems
():
zeros
=
block
.
create_var
(
dtype
=
var
.
data_type
)
g_var
=
_clone_var_in_block_
(
block
,
var
)
zeros
=
block
.
create_var
(
dtype
=
"float32"
,
persistable
=
True
)
block
.
append_op
(
type
=
"fill_constant"
,
outputs
=
{
"Out"
:
[
zeros
]},
attrs
=
{
"shape"
:
var
.
shape
,
"value"
:
0
,
"shape"
:
g_var
.
shape
,
"value"
:
.
0
,
"data_type"
:
5
,
})
block
.
append_op
(
type
=
"scale"
,
inputs
=
{
"X"
:
zeros
},
outputs
=
{
"Out"
:
var
})
executor
.
run
(
reset_program
)
type
=
"scale"
,
inputs
=
{
"X"
:
zeros
},
outputs
=
{
"Out"
:
g_var
})
print
reset_program
executor
.
run
(
reset_program
,
fetch_list
=
self
.
_states
.
values
())
def
eval
(
self
,
executor
,
program
=
None
):
"""
...
...
@@ -53,15 +79,16 @@ class Accuracy(Evaluator):
Accuracy need two state variable Total, Correct
"""
def
__init__
(
self
,
input
,
label
,
k
=
1
,
**
kwargs
):
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
(
Accuracy
,
self
).
__init__
(
"accuracy"
,
**
kwargs
)
block
=
self
.
_program
.
global_block
()
# block = self._eval_program.global_block()
block
=
self
.
_main_program
.
global_block
()
g_total
=
block
.
create_var
(
name
=
unique_name
(
"Total"
),
persistable
=
True
,
dtype
=
"int64"
,
shape
=
[
1
])
g_correct
=
helper
.
create_global_variable
(
g_correct
=
block
.
create_var
(
name
=
unique_name
(
"Correct"
),
persistable
=
True
,
dtype
=
"int64"
,
...
...
@@ -69,6 +96,8 @@ class Accuracy(Evaluator):
self
.
_states
[
"Total"
]
=
g_total
self
.
_states
[
"Correct"
]
=
g_correct
def
_update_ops
(
self
,
input
,
label
,
k
=
1
,
**
kwargs
):
block
=
self
.
_main_program
.
global_block
()
topk_out
=
block
.
create_var
(
dtype
=
input
.
data_type
)
topk_indices
=
block
.
create_var
(
dtype
=
"int64"
)
block
.
append_op
(
...
...
@@ -77,8 +106,9 @@ class Accuracy(Evaluator):
outputs
=
{
"Out"
:
[
topk_out
],
"Indices"
:
[
topk_indices
]},
attrs
=
{
"k"
:
k
})
acc_out_dtype
=
kwargs
.
get
(
"out_dtype"
,
"float32"
)
acc_out
=
block
.
create_var
(
dtype
=
acc_out_dtype
)
acc_out
=
block
.
create_var
(
dtype
=
kwargs
.
get
(
"out_dtype"
,
"float32"
))
correct
=
block
.
create_var
(
dtype
=
"int64"
,
persistable
=
True
)
total
=
block
.
create_var
(
dtype
=
"int64"
,
persistable
=
True
)
block
.
append_op
(
type
=
"accuracy"
,
inputs
=
{
...
...
@@ -92,39 +122,121 @@ class Accuracy(Evaluator):
"Total"
:
[
total
],
})
# block = self._eval_program.global_block()
# e_correct = _clone_var_in_block_(block, correct)
# e_total = _clone_var_in_block_(block, total)
# block.append_op(
# type="sum",
# inputs={"X": [self._states["Total"], total]},
# outputs={"Out": [self._states["Total"]]})
block
.
append_op
(
type
=
"cast"
,
inputs
=
{
"X"
:
[
self
.
_states
[
"Total"
]]},
outputs
=
{
"Out"
:
[
self
.
_states
[
"Total"
]]},
attrs
=
{
"in_data_type"
:
5
,
"out_data_type"
:
2
,
})
block
.
append_op
(
type
=
"cast"
,
inputs
=
{
"X"
:
[
self
.
_states
[
"Correct"
]]},
outputs
=
{
"Out"
:
[
self
.
_states
[
"Correct"
]]},
attrs
=
{
"in_data_type"
:
5
,
"out_data_type"
:
2
,
})
block
.
append_op
(
type
=
"sum"
,
inputs
=
{
"X"
:
[
g_total
,
total
]},
outputs
=
{
"Out"
:
[
g_total
]})
type
=
"elementwise_add"
,
inputs
=
{
"X"
:
[
self
.
_states
[
"Total"
]],
"Y"
:
[
total
]},
outputs
=
{
"Out"
:
[
self
.
_states
[
"Total"
]]})
block
.
append_op
(
type
=
"sum"
,
inputs
=
{
"X"
:
[
g_correct
,
correct
]},
outputs
=
{
"Out"
:
[
g_total
]})
type
=
"elementwise_add"
,
inputs
=
{
"X"
:
[
self
.
_states
[
"Correct"
]],
"Y"
:
[
correct
]},
outputs
=
{
"Out"
:
[
self
.
_states
[
"Correct"
]]})
# g_total = self._states["Total"]
# print g_total
# print total
# print "*" * 100
# print g_total.block.program == total.block.program
# g_total = _clone_var_in_block_(block, self._states["Total"])
# e_total = _clone_var_in_block_(block, total)
# block.append_op(
# type="sum",
# inputs={"X": [g_total, e_total]},
# outputs={"Out": [g_total]})
# block.append_op(
# type="sum",
# inputs={"X": [self._states["Correct"], correct]},
# outputs={"Out": [self._states["Correct"]]})
# print self._main_program
return
acc_out
def
eval
(
self
,
executor
,
program
=
None
):
if
program
==
None
:
eval_program
=
Program
()
else
:
eval_program
=
program
block
=
eval_program
.
global_block
()
eval_out
=
block
.
create_var
(
dtype
=
self
.
_helper
.
input_dtype
())
def
eval
(
self
,
executor
):
block
=
self
.
_eval_program
.
global_block
()
eval_out
=
block
.
create_var
(
dtype
=
self
.
_states
[
"Total"
].
data_type
)
e_correct
=
_clone_var_in_block_
(
block
,
correct
)
e_total
=
_clone_var_in_block_
(
block
,
total
)
# block.append_op(
# type="elementwise_div",
# inputs={"X": self._states["Total"],
# "Y": self._states["Correct"]},
# outputs={"Out": eval_out})
block
.
append_op
(
type
=
"elementwise_div"
,
inputs
=
{
"X"
:
self
.
_states
[
"Total"
]
,
"Y"
:
self
.
_states
[
"Correct"
]
},
inputs
=
{
"X"
:
e_total
,
"Y"
:
e_correct
},
outputs
=
{
"Out"
:
eval_out
})
return
executor
.
run
(
eval_program
,
fetch_list
=
[
eval_out
])
return
executor
.
run
(
self
.
_
eval_program
,
fetch_list
=
[
eval_out
])
# Demo for composing low level op to compute the F1 metric
class
F1
(
Evaluator
):
def
__init__
(
self
,
input
,
label
,
**
kwargs
):
super
(
F1
,
self
).
__init__
(
"F1"
,
**
kwargs
)
g_tp
=
helper
.
create_global_variable
(
# Demo for composing low level ops to compute the F1 metric
class
FScore
(
Evaluator
):
def
__init__
(
self
,
input
,
label
,
beta
=
1.0
,
**
kwargs
):
super
(
F1
,
self
).
__init__
(
"FScore"
,
**
kwargs
)
block
=
self
.
_program
.
global_block
()
g_tp
=
block
.
create_var
(
name
=
unique_name
(
"Tp"
),
persistable
=
True
,
dtype
=
"int64"
,
shape
=
[
1
])
g_fp
=
helper
.
create_global_variable
(
g_fn
=
block
.
create_var
(
name
=
unique_name
(
"Fn"
),
persistable
=
True
,
dtype
=
"int64"
,
shape
=
[
1
])
g_fp
=
block
.
create_var
(
name
=
unique_name
(
"Fp"
),
persistable
=
True
,
dtype
=
"int64"
,
shape
=
[
1
])
self
.
_states
[
"Tp"
]
=
g_tp
self
.
_states
[
"Fp"
]
=
g_fp
self
.
_states
[
"Fn"
]
=
g_fn
def
_update_ops
(
self
):
block
=
self
.
_program
.
global_block
()
equal_out
=
block
.
create_var
()
block
.
append_op
(
type
=
"equal"
,
inputs
=
{
"X"
:
[
input
],
"Y"
:
[
label
]},
outputs
=
{
"Out"
:
equal_out
})
positive
=
block
.
create_var
()
block
.
append_op
(
type
=
"sequence_pool"
,
inputs
=
{
"X"
:
[
equal_out
]},
outputs
=
{
"Out"
:
positive
},
attrs
=
{
"pooltype"
:
"SUM"
})
batch
=
block
.
create_var
(
name
=
feed_var_name
,
type
=
core
.
VarDesc
.
VarType
.
FEED_MINIBATCH
,
persistable
=
True
)
# def register():
accuracy
=
Accuracy
# def accuracy(*args, **kwargs):
# acc = Accuracy(**kwargs)
# return acc._update_ops(*args, **kwargs)
python/paddle/v2/framework/framework.py
浏览文件 @
b8f557f2
...
...
@@ -550,7 +550,7 @@ class Parameter(Variable):
raise
ValueError
(
"Parameter shape should not be related with "
"batch-size"
)
super
(
Parameter
,
self
)
.
__init__
(
Variable
.
__init__
(
self
,
block
,
persistable
=
True
,
shape
=
shape
,
dtype
=
dtype
,
**
kwargs
)
self
.
trainable
=
kwargs
.
get
(
'trainable'
,
True
)
...
...
python/paddle/v2/framework/layers.py
浏览文件 @
b8f557f2
...
...
@@ -263,7 +263,9 @@ def accuracy(input, label, k=1, **kwargs):
"Indices"
:
[
topk_indices
]},
attrs
=
{
"k"
:
k
})
acc_out_dtype
=
kwargs
.
get
(
"out_dtype"
,
"float32"
)
acc_out
=
helper
.
create_tmp_variable
(
dtype
=
acc_out_dtype
)
acc_out
=
helper
.
create_tmp_variable
(
dtype
=
"float32"
)
correct
=
helper
.
create_tmp_variable
(
dtype
=
"int64"
)
total
=
helper
.
create_tmp_variable
(
dtype
=
"int64"
)
helper
.
append_op
(
type
=
"accuracy"
,
inputs
=
{
...
...
@@ -271,7 +273,11 @@ def accuracy(input, label, k=1, **kwargs):
"Indices"
:
[
topk_indices
],
"Label"
:
[
label
]
},
outputs
=
{
"Accuracy"
:
[
acc_out
]})
outputs
=
{
"Accuracy"
:
[
acc_out
],
"Correct"
:
[
correct
],
"Total"
:
[
total
],
})
return
acc_out
...
...
python/paddle/v2/framework/tests/test_accuracy_op.py
浏览文件 @
b8f557f2
...
...
@@ -19,7 +19,8 @@ class TestAccuracyOp(OpTest):
break
self
.
outputs
=
{
'Accuracy'
:
np
.
array
([
num_correct
/
float
(
n
)]).
astype
(
"float32"
),
'Correct'
:
np
.
array
([
num_correct
]).
astype
(
"int32"
)
'Correct'
:
np
.
array
([
num_correct
]).
astype
(
"int32"
),
'Total'
:
np
.
array
([
n
]).
astype
(
"int32"
)
}
def
test_check_output
(
self
):
...
...
@@ -27,5 +28,4 @@ class TestAccuracyOp(OpTest):
if
__name__
==
'__main__'
:
exit
(
0
)
unittest
.
main
()
python/paddle/v2/framework/tests/test_recognize_digits_conv.py
浏览文件 @
b8f557f2
...
...
@@ -3,6 +3,7 @@ import paddle.v2.framework.layers as layers
import
paddle.v2.framework.nets
as
nets
import
paddle.v2.framework.core
as
core
import
paddle.v2.framework.optimizer
as
optimizer
import
paddle.v2.framework.evaluator
as
evaluator
from
paddle.v2.framework.framework
import
Program
,
g_main_program
from
paddle.v2.framework.executor
import
Executor
...
...
@@ -54,17 +55,24 @@ cost = layers.cross_entropy(
main_program
=
main_program
,
startup_program
=
startup_program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
main_program
=
main_program
)
accuracy
=
layers
.
accuracy
(
input
=
predict
,
label
=
label
,
main_program
=
main_program
,
startup_program
=
startup_program
)
# accuracy = layers.accuracy(
# input=predict,
# label=label,
# main_program=main_program,
# startup_program=startup_program)
# optimizer = optimizer.MomentumOptimizer(learning_rate=0.1 / 128.0,
# momentum=0.9)
optimizer
=
optimizer
.
AdamOptimizer
(
learning_rate
=
0.01
,
beta1
=
0.9
,
beta2
=
0.999
)
opts
=
optimizer
.
minimize
(
avg_cost
,
startup_program
)
accuracy
=
evaluator
.
accuracy
(
input
=
predict
,
label
=
label
,
main_program
=
main_program
,
startup_program
=
startup_program
)
acc_out
=
accuracy
.
_update_ops
(
input
=
predict
,
label
=
label
,
main_program
=
main_program
)
BATCH_SIZE
=
50
PASS_NUM
=
3
train_reader
=
paddle
.
batch
(
...
...
@@ -79,6 +87,7 @@ exe.run(startup_program, feed={}, fetch_list=[])
for
pass_id
in
range
(
PASS_NUM
):
count
=
0
accuracy
.
reset
(
exe
)
for
data
in
train_reader
():
img_data
=
np
.
array
(
map
(
lambda
x
:
x
[
0
].
reshape
([
1
,
28
,
28
]),
data
)).
astype
(
"float32"
)
...
...
@@ -93,11 +102,14 @@ for pass_id in range(PASS_NUM):
outs
=
exe
.
run
(
main_program
,
feed
=
{
"pixel"
:
tensor_img
,
"label"
:
tensor_y
},
fetch_list
=
[
avg_cost
,
acc
uracy
])
fetch_list
=
[
avg_cost
,
acc
_out
])
loss
=
np
.
array
(
outs
[
0
])
acc
=
np
.
array
(
outs
[
1
])
# pass_acc = accuracy.eval(exe)
# print pass_acc
print
loss
,
acc
if
loss
<
10.0
and
acc
>
0.9
:
# if avg cost less than 10.0 and accuracy is larger than 0.9, we think our code is good.
exit
(
0
)
#
if loss < 10.0 and acc > 0.9:
#
# if avg cost less than 10.0 and accuracy is larger than 0.9, we think our code is good.
#
exit(0)
exit
(
1
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录