Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
46c61b35
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
46c61b35
编写于
11月 09, 2017
作者:
D
Dong Zhihong
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
"add elementwise op support"
上级
b8f557f2
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
71 addition
and
78 deletion
+71
-78
paddle/operators/elementwise_div_op.cc
paddle/operators/elementwise_div_op.cc
+8
-2
paddle/operators/elementwise_mul_op.cc
paddle/operators/elementwise_mul_op.cc
+6
-2
paddle/operators/elementwise_sub_op.cc
paddle/operators/elementwise_sub_op.cc
+8
-2
python/paddle/v2/framework/evaluator.py
python/paddle/v2/framework/evaluator.py
+39
-56
python/paddle/v2/framework/tests/test_recognize_digits_conv.py
...n/paddle/v2/framework/tests/test_recognize_digits_conv.py
+10
-16
未找到文件。
paddle/operators/elementwise_div_op.cc
浏览文件 @
46c61b35
...
@@ -35,7 +35,13 @@ REGISTER_OP(elementwise_div, ops::ElementwiseOp, ops::ElementwiseDivOpMaker,
...
@@ -35,7 +35,13 @@ REGISTER_OP(elementwise_div, ops::ElementwiseOp, ops::ElementwiseDivOpMaker,
elementwise_div_grad
,
ops
::
ElementwiseOpGrad
);
elementwise_div_grad
,
ops
::
ElementwiseOpGrad
);
REGISTER_OP_CPU_KERNEL
(
REGISTER_OP_CPU_KERNEL
(
elementwise_div
,
elementwise_div
,
ops
::
ElementwiseDivKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
ops
::
ElementwiseDivKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
,
ops
::
ElementwiseDivKernel
<
paddle
::
platform
::
CPUPlace
,
double
>
,
ops
::
ElementwiseDivKernel
<
paddle
::
platform
::
CPUPlace
,
int
>
,
ops
::
ElementwiseDivKernel
<
paddle
::
platform
::
CPUPlace
,
int64_t
>
);
REGISTER_OP_CPU_KERNEL
(
REGISTER_OP_CPU_KERNEL
(
elementwise_div_grad
,
elementwise_div_grad
,
ops
::
ElementwiseDivGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
ops
::
ElementwiseDivGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
,
ops
::
ElementwiseDivGradKernel
<
paddle
::
platform
::
CPUPlace
,
double
>
,
ops
::
ElementwiseDivGradKernel
<
paddle
::
platform
::
CPUPlace
,
int
>
,
ops
::
ElementwiseDivGradKernel
<
paddle
::
platform
::
CPUPlace
,
int64_t
>
);
paddle/operators/elementwise_mul_op.cc
浏览文件 @
46c61b35
...
@@ -37,8 +37,12 @@ REGISTER_OP(elementwise_mul, ops::ElementwiseOp, ops::ElementwiseMulOpMaker,
...
@@ -37,8 +37,12 @@ REGISTER_OP(elementwise_mul, ops::ElementwiseOp, ops::ElementwiseMulOpMaker,
REGISTER_OP_CPU_KERNEL
(
REGISTER_OP_CPU_KERNEL
(
elementwise_mul
,
elementwise_mul
,
ops
::
ElementwiseMulKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
,
ops
::
ElementwiseMulKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
,
ops
::
ElementwiseMulKernel
<
paddle
::
platform
::
CPUPlace
,
double
>
);
ops
::
ElementwiseMulKernel
<
paddle
::
platform
::
CPUPlace
,
double
>
,
ops
::
ElementwiseMulKernel
<
paddle
::
platform
::
CPUPlace
,
int
>
,
ops
::
ElementwiseMulKernel
<
paddle
::
platform
::
CPUPlace
,
int64_t
>
);
REGISTER_OP_CPU_KERNEL
(
REGISTER_OP_CPU_KERNEL
(
elementwise_mul_grad
,
elementwise_mul_grad
,
ops
::
ElementwiseMulGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
,
ops
::
ElementwiseMulGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
,
ops
::
ElementwiseMulGradKernel
<
paddle
::
platform
::
CPUPlace
,
double
>
);
ops
::
ElementwiseMulGradKernel
<
paddle
::
platform
::
CPUPlace
,
double
>
,
ops
::
ElementwiseMulGradKernel
<
paddle
::
platform
::
CPUPlace
,
int
>
,
ops
::
ElementwiseMulGradKernel
<
paddle
::
platform
::
CPUPlace
,
int64_t
>
);
paddle/operators/elementwise_sub_op.cc
浏览文件 @
46c61b35
...
@@ -34,7 +34,13 @@ REGISTER_OP(elementwise_sub, ops::ElementwiseOp, ops::ElementwiseSubOpMaker,
...
@@ -34,7 +34,13 @@ REGISTER_OP(elementwise_sub, ops::ElementwiseOp, ops::ElementwiseSubOpMaker,
elementwise_sub_grad
,
ops
::
ElementwiseOpGrad
);
elementwise_sub_grad
,
ops
::
ElementwiseOpGrad
);
REGISTER_OP_CPU_KERNEL
(
REGISTER_OP_CPU_KERNEL
(
elementwise_sub
,
elementwise_sub
,
ops
::
ElementwiseSubKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
ops
::
ElementwiseSubKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
,
ops
::
ElementwiseSubKernel
<
paddle
::
platform
::
CPUPlace
,
double
>
,
ops
::
ElementwiseSubKernel
<
paddle
::
platform
::
CPUPlace
,
int
>
,
ops
::
ElementwiseSubKernel
<
paddle
::
platform
::
CPUPlace
,
int64_t
>
);
REGISTER_OP_CPU_KERNEL
(
REGISTER_OP_CPU_KERNEL
(
elementwise_sub_grad
,
elementwise_sub_grad
,
ops
::
ElementwiseSubGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
);
ops
::
ElementwiseSubGradKernel
<
paddle
::
platform
::
CPUPlace
,
float
>
,
ops
::
ElementwiseSubGradKernel
<
paddle
::
platform
::
CPUPlace
,
double
>
,
ops
::
ElementwiseSubGradKernel
<
paddle
::
platform
::
CPUPlace
,
int
>
,
ops
::
ElementwiseSubGradKernel
<
paddle
::
platform
::
CPUPlace
,
int64_t
>
);
python/paddle/v2/framework/evaluator.py
浏览文件 @
46c61b35
import
numpy
as
np
from
paddle.v2.framework.framework
import
Program
,
g_main_program
,
unique_name
,
Variable
from
paddle.v2.framework.framework
import
Program
,
g_main_program
,
unique_name
,
Variable
import
paddle.v2.framework.core
as
core
import
paddle.v2.framework.core
as
core
...
@@ -31,12 +32,8 @@ class Evaluator(object):
...
@@ -31,12 +32,8 @@ class Evaluator(object):
self
.
_main_program
=
kwargs
.
get
(
"main_program"
)
self
.
_main_program
=
kwargs
.
get
(
"main_program"
)
else
:
else
:
self
.
_main_program
=
g_main_program
self
.
_main_program
=
g_main_program
if
kwargs
.
has_key
(
"eval_program"
):
self
.
_eval_program
=
kwargs
.
get
(
"eval_program"
)
else
:
self
.
_eval_program
=
Program
()
def
_update_ops
(
self
):
def
_update_ops
(
self
,
*
args
,
**
kwargs
):
"""
"""
append update ops to the global states
append update ops to the global states
"""
"""
...
@@ -64,13 +61,12 @@ class Evaluator(object):
...
@@ -64,13 +61,12 @@ class Evaluator(object):
})
})
block
.
append_op
(
block
.
append_op
(
type
=
"scale"
,
inputs
=
{
"X"
:
zeros
},
outputs
=
{
"Out"
:
g_var
})
type
=
"scale"
,
inputs
=
{
"X"
:
zeros
},
outputs
=
{
"Out"
:
g_var
})
print
reset_program
executor
.
run
(
reset_program
,
fetch_list
=
self
.
_states
.
values
())
executor
.
run
(
reset_program
,
fetch_list
=
self
.
_states
.
values
())
def
eval
(
self
,
executor
,
program
=
None
):
def
eval
(
self
,
executor
,
program
=
None
):
"""
"""
Merge the mini-batch statistics to form the evaluation result for multiple mini-batches.
Merge the mini-batch statistics to form the evaluation result for multiple mini-batches.
"""
"""
raise
NotImplementedError
()
raise
NotImplementedError
()
...
@@ -81,7 +77,6 @@ class Accuracy(Evaluator):
...
@@ -81,7 +77,6 @@ class Accuracy(Evaluator):
def
__init__
(
self
,
*
args
,
**
kwargs
):
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
(
Accuracy
,
self
).
__init__
(
"accuracy"
,
**
kwargs
)
super
(
Accuracy
,
self
).
__init__
(
"accuracy"
,
**
kwargs
)
# block = self._eval_program.global_block()
block
=
self
.
_main_program
.
global_block
()
block
=
self
.
_main_program
.
global_block
()
g_total
=
block
.
create_var
(
g_total
=
block
.
create_var
(
name
=
unique_name
(
"Total"
),
name
=
unique_name
(
"Total"
),
...
@@ -122,21 +117,13 @@ class Accuracy(Evaluator):
...
@@ -122,21 +117,13 @@ class Accuracy(Evaluator):
"Total"
:
[
total
],
"Total"
:
[
total
],
})
})
# block = self._eval_program.global_block()
# e_correct = _clone_var_in_block_(block, correct)
# e_total = _clone_var_in_block_(block, total)
# block.append_op(
# type="sum",
# inputs={"X": [self._states["Total"], total]},
# outputs={"Out": [self._states["Total"]]})
block
.
append_op
(
block
.
append_op
(
type
=
"cast"
,
type
=
"cast"
,
inputs
=
{
"X"
:
[
self
.
_states
[
"Total"
]]},
inputs
=
{
"X"
:
[
self
.
_states
[
"Total"
]]},
outputs
=
{
"Out"
:
[
self
.
_states
[
"Total"
]]},
outputs
=
{
"Out"
:
[
self
.
_states
[
"Total"
]]},
attrs
=
{
attrs
=
{
"in_data_type"
:
5
,
"in_data_type"
:
5
,
# float32
"out_data_type"
:
2
,
"out_data_type"
:
2
,
#int32
})
})
block
.
append_op
(
block
.
append_op
(
type
=
"cast"
,
type
=
"cast"
,
...
@@ -158,44 +145,40 @@ class Accuracy(Evaluator):
...
@@ -158,44 +145,40 @@ class Accuracy(Evaluator):
"Y"
:
[
correct
]},
"Y"
:
[
correct
]},
outputs
=
{
"Out"
:
[
self
.
_states
[
"Correct"
]]})
outputs
=
{
"Out"
:
[
self
.
_states
[
"Correct"
]]})
# g_total = self._states["Total"]
# print g_total
# print total
# print "*" * 100
# print g_total.block.program == total.block.program
# g_total = _clone_var_in_block_(block, self._states["Total"])
# e_total = _clone_var_in_block_(block, total)
# block.append_op(
# type="sum",
# inputs={"X": [g_total, e_total]},
# outputs={"Out": [g_total]})
# block.append_op(
# type="sum",
# inputs={"X": [self._states["Correct"], correct]},
# outputs={"Out": [self._states["Correct"]]})
# print self._main_program
return
acc_out
return
acc_out
def
eval
(
self
,
executor
):
def
eval
(
self
,
executor
,
program
=
None
):
block
=
self
.
_eval_program
.
global_block
()
if
program
!=
None
:
eval_program
=
program
else
:
eval_program
=
Program
()
block
=
eval_program
.
global_block
()
eval_out
=
block
.
create_var
(
dtype
=
self
.
_states
[
"Total"
].
data_type
)
eval_out
=
block
.
create_var
(
dtype
=
self
.
_states
[
"Total"
].
data_type
)
e_correct
=
_clone_var_in_block_
(
block
,
correct
)
e_total
=
_clone_var_in_block_
(
block
,
self
.
_states
[
"Total"
])
e_total
=
_clone_var_in_block_
(
block
,
total
)
e_correct
=
_clone_var_in_block_
(
block
,
self
.
_states
[
"Correct"
])
# block.append_op(
block
.
append_op
(
# type="elementwise_div",
type
=
"cast"
,
# inputs={"X": self._states["Total"],
inputs
=
{
"X"
:
[
e_total
]},
# "Y": self._states["Correct"]},
outputs
=
{
"Out"
:
[
e_total
]},
# outputs={"Out": eval_out})
attrs
=
{
"in_data_type"
:
2
,
#int32
"out_data_type"
:
5
,
#float32
})
block
.
append_op
(
type
=
"cast"
,
inputs
=
{
"X"
:
[
e_correct
]},
outputs
=
{
"Out"
:
[
e_correct
]},
attrs
=
{
"in_data_type"
:
2
,
"out_data_type"
:
5
,
})
block
.
append_op
(
block
.
append_op
(
type
=
"elementwise_div"
,
type
=
"elementwise_div"
,
inputs
=
{
"X"
:
e_
total
,
inputs
=
{
"X"
:
e_
correct
,
"Y"
:
e_
correct
},
"Y"
:
e_
total
},
outputs
=
{
"Out"
:
eval_out
})
outputs
=
{
"Out"
:
eval_out
})
return
executor
.
run
(
self
.
_eval_program
,
fetch_list
=
[
eval_out
])
out
=
executor
.
run
(
eval_program
,
fetch_list
=
[
eval_out
])
return
np
.
array
(
out
[
0
])
# Demo for composing low level ops to compute the F1 metric
# Demo for composing low level ops to compute the F1 metric
...
@@ -235,8 +218,8 @@ class FScore(Evaluator):
...
@@ -235,8 +218,8 @@ class FScore(Evaluator):
persistable
=
True
)
persistable
=
True
)
#
def register():
#
FIXME(dzh): add a decorator to call _update_ops automatically
accuracy
=
Accuracy
def
accuracy
(
*
args
,
**
kwargs
):
# def accuracy(*args, **kwargs):
cls
=
Accuracy
(
*
args
,
**
kwargs
)
# acc = Accuracy(
**kwargs)
out
=
cls
.
_update_ops
(
*
args
,
**
kwargs
)
# return acc._update_ops(*args, **kwargs)
return
cls
,
out
python/paddle/v2/framework/tests/test_recognize_digits_conv.py
浏览文件 @
46c61b35
...
@@ -55,23 +55,14 @@ cost = layers.cross_entropy(
...
@@ -55,23 +55,14 @@ cost = layers.cross_entropy(
main_program
=
main_program
,
main_program
=
main_program
,
startup_program
=
startup_program
)
startup_program
=
startup_program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
main_program
=
main_program
)
avg_cost
=
layers
.
mean
(
x
=
cost
,
main_program
=
main_program
)
# accuracy = layers.accuracy(
# input=predict,
# label=label,
# main_program=main_program,
# startup_program=startup_program)
# optimizer = optimizer.MomentumOptimizer(learning_rate=0.1 / 128.0,
# momentum=0.9)
optimizer
=
optimizer
.
AdamOptimizer
(
learning_rate
=
0.01
,
beta1
=
0.9
,
beta2
=
0.999
)
optimizer
=
optimizer
.
AdamOptimizer
(
learning_rate
=
0.01
,
beta1
=
0.9
,
beta2
=
0.999
)
opts
=
optimizer
.
minimize
(
avg_cost
,
startup_program
)
opts
=
optimizer
.
minimize
(
avg_cost
,
startup_program
)
accuracy
=
evaluator
.
accuracy
(
accuracy
,
acc_out
=
evaluator
.
accuracy
(
input
=
predict
,
input
=
predict
,
label
=
label
,
label
=
label
,
main_program
=
main_program
,
main_program
=
main_program
,
startup_program
=
startup_program
)
startup_program
=
startup_program
)
acc_out
=
accuracy
.
_update_ops
(
input
=
predict
,
label
=
label
,
main_program
=
main_program
)
BATCH_SIZE
=
50
BATCH_SIZE
=
50
PASS_NUM
=
3
PASS_NUM
=
3
...
@@ -105,11 +96,14 @@ for pass_id in range(PASS_NUM):
...
@@ -105,11 +96,14 @@ for pass_id in range(PASS_NUM):
fetch_list
=
[
avg_cost
,
acc_out
])
fetch_list
=
[
avg_cost
,
acc_out
])
loss
=
np
.
array
(
outs
[
0
])
loss
=
np
.
array
(
outs
[
0
])
acc
=
np
.
array
(
outs
[
1
])
acc
=
np
.
array
(
outs
[
1
])
# pass_acc = accuracy.eval(exe)
pass_acc
=
accuracy
.
eval
(
exe
)
# print pass_acc
print
"pass id : "
,
pass_id
,
pass_acc
print
loss
,
acc
# print loss, acc
if
loss
<
10.0
and
acc
>
0.9
:
# if avg cost less than 10.0 and accuracy is larger than 0.9, we think our code is good.
exit
(
0
)
pass_acc
=
accuracy
.
eval
(
exe
)
print
"pass id : "
,
pass_id
,
pass_acc
# if loss < 10.0 and acc > 0.9:
# # if avg cost less than 10.0 and accuracy is larger than 0.9, we think our code is good.
# exit(0)
exit
(
1
)
exit
(
1
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录