Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
c3862a75
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c3862a75
编写于
7月 10, 2018
作者:
T
tensor-tang
浏览文件
操作
浏览文件
下载
差异文件
Merge remote-tracking branch 'ups/develop' into feature/libxsmm
上级
de856da9
ef4895df
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
96 addition
and
25 deletion
+96
-25
paddle/fluid/operators/conditional_block_op.cc
paddle/fluid/operators/conditional_block_op.cc
+4
-3
paddle/fluid/operators/merge_lod_tensor_op.cc
paddle/fluid/operators/merge_lod_tensor_op.cc
+20
-10
python/paddle/fluid/tests/test_if_else_op.py
python/paddle/fluid/tests/test_if_else_op.py
+72
-12
未找到文件。
paddle/fluid/operators/conditional_block_op.cc
浏览文件 @
c3862a75
...
...
@@ -205,10 +205,11 @@ class ConditionalBlockGradInferShape : public framework::InferShapeBase {
context
->
SetOutputsDim
(
framework
::
GradVarName
(
"Params"
),
context
->
GetInputsDim
(
"Params"
));
}
PADDLE_ENFORCE
(
context
->
HasOutputs
(
framework
::
GradVarName
(
"X"
)));
if
(
context
->
HasOutputs
(
framework
::
GradVarName
(
"X"
)))
{
context
->
SetOutputsDim
(
framework
::
GradVarName
(
"X"
),
context
->
GetInputsDim
(
"X"
));
}
}
};
class
ConditionalBlockGradMaker
:
public
framework
::
SingleGradOpDescMaker
{
...
...
paddle/fluid/operators/merge_lod_tensor_op.cc
浏览文件 @
c3862a75
...
...
@@ -44,8 +44,10 @@ class MergeLoDTensorOp : public framework::OperatorBase {
scope
.
FindVar
(
Output
(
"Out"
))
->
GetMutable
<
framework
::
LoDTensor
>
();
auto
level
=
static_cast
<
size_t
>
(
Attr
<
int
>
(
"level"
));
auto
&
mask_dim
=
mask
.
dims
();
PADDLE_ENFORCE
(
in_true
.
numel
()
||
in_false
.
numel
(),
"Input(InTrue) or Input(InFalse) should be initialized."
);
auto
&
mask_dim
=
mask
.
dims
();
std
::
unique_ptr
<
framework
::
LoDTensor
>
cpu_mask
{
new
framework
::
LoDTensor
()};
if
(
platform
::
is_cpu_place
(
mask
.
place
()))
{
cpu_mask
->
ShareDataWith
(
mask
);
...
...
@@ -59,19 +61,27 @@ class MergeLoDTensorOp : public framework::OperatorBase {
}
auto
*
mask_data
=
cpu_mask
->
data
<
bool
>
();
int
rank
=
in_true
.
dims
().
size
();
platform
::
Place
place
=
in_true
.
place
();
std
::
type_index
data_type
=
in_true
.
type
();
framework
::
DDim
in_true_dims
=
framework
::
slice_ddim
(
in_true
.
dims
(),
1
,
rank
);
platform
::
Place
place
=
dev_place
;
int64_t
batch_size
=
in_true
.
dims
()[
0
]
+
in_false
.
dims
()[
0
];
auto
in_true_dim_vec
=
framework
::
vectorize
(
in_true_dims
);
in_true_dim_vec
.
insert
(
in_true_dim_vec
.
begin
(),
batch_size
);
std
::
type_index
data_type
=
in_true
.
IsInitialized
()
?
in_true
.
type
()
:
in_false
.
type
();
int
rank
;
framework
::
DDim
in_dims
;
if
(
in_true
.
IsInitialized
())
{
rank
=
in_true
.
dims
().
size
();
in_dims
=
framework
::
slice_ddim
(
in_true
.
dims
(),
1
,
rank
);
}
else
{
rank
=
in_false
.
dims
().
size
();
in_dims
=
framework
::
slice_ddim
(
in_false
.
dims
(),
1
,
rank
);
}
auto
in_dim_vec
=
framework
::
vectorize
(
in_dims
);
in_dim_vec
.
insert
(
in_dim_vec
.
begin
(),
batch_size
);
framework
::
DDim
out_dims
=
framework
::
make_ddim
(
in_
true_
dim_vec
);
framework
::
DDim
out_dims
=
framework
::
make_ddim
(
in_dim_vec
);
out
->
Resize
(
out_dims
);
out
->
mutable_data
(
place
,
data_type
);
auto
*
out_lod
=
out
->
mutable_lod
();
...
...
python/paddle/fluid/tests/test_
mnist_
if_else_op.py
→
python/paddle/fluid/tests/test_if_else_op.py
浏览文件 @
c3862a75
...
...
@@ -14,10 +14,11 @@
import
paddle
import
paddle.fluid.layers
as
layers
from
paddle.fluid.framework
import
Program
,
program_guard
,
default_main_program
,
default_startup_program
from
paddle.fluid.framework
import
Program
,
program_guard
from
paddle.fluid.executor
import
Executor
from
paddle.fluid.optimizer
import
MomentumOptimizer
import
paddle.fluid.core
as
core
import
paddle.fluid
as
fluid
import
unittest
import
numpy
as
np
...
...
@@ -31,14 +32,13 @@ class TestMNISTIfElseOp(unittest.TestCase):
label
=
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
'int64'
)
limit
=
layers
.
fill_constant_batch_size_like
(
input
=
label
,
dtype
=
'int64'
,
shape
=
[
1
],
value
=
5.0
)
limit
=
layers
.
fill_constant
(
shape
=
[
1
],
dtype
=
'int64'
,
value
=
5
)
cond
=
layers
.
less_than
(
x
=
label
,
y
=
limit
)
true_image
,
false_image
=
layers
.
split_lod_tensor
(
input
=
image
,
mask
=
cond
)
true_out
=
layers
.
create_tensor
(
dtype
=
'float32'
)
true_cond
=
layers
.
ConditionalBlock
([
true_image
])
true_cond
=
layers
.
ConditionalBlock
([
cond
])
with
true_cond
.
block
():
hidden
=
layers
.
fc
(
input
=
true_image
,
size
=
100
,
act
=
'tanh'
)
...
...
@@ -46,7 +46,7 @@ class TestMNISTIfElseOp(unittest.TestCase):
layers
.
assign
(
input
=
prob
,
output
=
true_out
)
false_out
=
layers
.
create_tensor
(
dtype
=
'float32'
)
false_cond
=
layers
.
ConditionalBlock
([
false_image
])
false_cond
=
layers
.
ConditionalBlock
([
cond
])
with
false_cond
.
block
():
hidden
=
layers
.
fc
(
input
=
false_image
,
size
=
200
,
act
=
'tanh'
)
...
...
@@ -64,7 +64,7 @@ class TestMNISTIfElseOp(unittest.TestCase):
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
mnist
.
train
(),
buf_size
=
8192
),
batch_size
=
20
0
)
batch_size
=
1
0
)
place
=
core
.
CPUPlace
()
exe
=
Executor
(
place
)
...
...
@@ -94,8 +94,7 @@ class TestMNISTIfElseOp(unittest.TestCase):
label
=
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
'int64'
)
limit
=
layers
.
fill_constant_batch_size_like
(
input
=
label
,
dtype
=
'int64'
,
shape
=
[
1
],
value
=
5.0
)
limit
=
layers
.
fill_constant
(
shape
=
[
1
],
dtype
=
'int64'
,
value
=
5
)
cond
=
layers
.
less_than
(
x
=
label
,
y
=
limit
)
ie
=
layers
.
IfElse
(
cond
)
...
...
@@ -125,7 +124,7 @@ class TestMNISTIfElseOp(unittest.TestCase):
place
=
core
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
.
run
(
kwargs
[
'startup_program'
]
)
exe
.
run
(
startup_prog
)
PASS_NUM
=
100
for
pass_id
in
range
(
PASS_NUM
):
for
data
in
train_reader
():
...
...
@@ -133,7 +132,7 @@ class TestMNISTIfElseOp(unittest.TestCase):
y_data
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"int64"
)
y_data
=
y_data
.
reshape
((
y_data
.
shape
[
0
],
1
))
outs
=
exe
.
run
(
kwargs
[
'main_program'
]
,
outs
=
exe
.
run
(
prog
,
feed
=
{
'x'
:
x_data
,
'y'
:
y_data
},
fetch_list
=
[
avg_loss
])
...
...
@@ -143,6 +142,67 @@ class TestMNISTIfElseOp(unittest.TestCase):
self
.
assertFalse
(
True
)
class
TestIfElse
(
unittest
.
TestCase
):
def
set_test_case
(
self
):
# condiction is: self.data < self.cond_value
self
.
cond_value
=
0.5
self
.
data
=
np
.
random
.
rand
(
25
,
1
).
astype
(
np
.
float32
)
def
compare_ifelse_op_and_numpy
(
self
,
place
):
self
.
set_test_case
()
prog
=
Program
()
startup_prog
=
Program
()
with
program_guard
(
prog
,
startup_prog
):
src
=
layers
.
data
(
name
=
'data'
,
shape
=
[
1
],
dtype
=
'float32'
)
cond
=
layers
.
fill_constant
(
[
1
],
dtype
=
'float32'
,
value
=
self
.
cond_value
)
ifcond
=
layers
.
less_than
(
x
=
src
,
y
=
cond
)
ie
=
layers
.
IfElse
(
ifcond
)
with
ie
.
true_block
():
true_target
=
ie
.
input
(
src
)
ie
.
output
(
true_target
)
with
ie
.
false_block
():
false_target
=
ie
.
input
(
src
)
ie
.
output
(
false_target
)
if_out
=
ie
()
out
=
layers
.
reduce_sum
(
if_out
)
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
fluid
.
default_startup_program
())
fetch_list
=
[
out
]
o1
,
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
'data'
:
self
.
data
},
fetch_list
=
[
out
])
o2
=
np
.
sum
(
self
.
data
)
self
.
assertTrue
(
np
.
allclose
(
o1
,
o2
,
atol
=
1e-8
),
"IfElse result : "
+
str
(
o1
)
+
"
\n
Numpy result :"
+
str
(
o2
))
def
test_cpu
(
self
):
self
.
compare_ifelse_op_and_numpy
(
fluid
.
CPUPlace
())
def
test_cuda
(
self
):
if
not
core
.
is_compiled_with_cuda
():
return
self
.
compare_ifelse_op_and_numpy
(
fluid
.
CUDAPlace
(
0
))
class
TestIfElseTrueBranch
(
TestIfElse
):
def
set_test_case
(
self
):
# condiction is: self.data < self.cond_value
self
.
cond_value
=
10.
self
.
data
=
np
.
random
.
rand
(
25
,
1
).
astype
(
np
.
float32
)
class
TestIfElseFalseBranch
(
TestIfElse
):
def
set_test_case
(
self
):
# condiction is: self.data < self.cond_value
self
.
cond_value
=
-
10.
self
.
data
=
np
.
random
.
rand
(
25
,
1
).
astype
(
np
.
float32
)
if
__name__
==
'__main__'
:
# temp disable if else unittest since it could be buggy.
exit
(
0
)
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录