Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
ba981604
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ba981604
编写于
1月 25, 2019
作者:
J
JiabinYang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix split
上级
3be8ffad
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
322 addition
and
22 deletion
+322
-22
paddle/fluid/framework/operator.cc
paddle/fluid/framework/operator.cc
+10
-11
python/paddle/fluid/imperative/nn.py
python/paddle/fluid/imperative/nn.py
+2
-10
python/paddle/fluid/tests/unittests/test_imperative.py
python/paddle/fluid/tests/unittests/test_imperative.py
+0
-1
python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py
...n/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py
+265
-0
python/paddle/fluid/tests/unittests/test_imperative_split.py
python/paddle/fluid/tests/unittests/test_imperative_split.py
+45
-0
未找到文件。
paddle/fluid/framework/operator.cc
浏览文件 @
ba981604
...
...
@@ -555,18 +555,17 @@ Tensor* ExecutionContext::LegacyOutput<Tensor>(const std::string& name) const {
template
<
>
std
::
vector
<
Tensor
*>
ExecutionContext
::
MultiOutput
<
Tensor
>
(
const
std
::
string
&
name
)
const
{
auto
names
=
op
().
Outputs
(
name
);
auto
it
=
ctx_
.
outputs
.
find
(
name
);
if
(
it
==
ctx_
.
outputs
.
end
())
{
return
{};
}
const
std
::
vector
<
Variable
*>&
vars
=
it
->
second
;
std
::
vector
<
Tensor
*>
res
;
res
.
reserve
(
names
.
size
());
std
::
transform
(
names
.
begin
(),
names
.
end
(),
std
::
back_inserter
(
res
),
[
&
](
const
std
::
string
&
sub_name
)
->
Tensor
*
{
auto
var
=
scope_
.
FindVar
(
sub_name
);
if
(
var
==
nullptr
)
return
nullptr
;
PADDLE_ENFORCE
(
var
->
IsType
<
LoDTensor
>
(),
"%s should be LoDTensor, but the received type is %s"
,
sub_name
,
ToTypeName
(
var
->
Type
()));
return
var
->
GetMutable
<
LoDTensor
>
();
res
.
reserve
(
vars
.
size
());
std
::
transform
(
vars
.
begin
(),
vars
.
end
(),
std
::
back_inserter
(
res
),
[
&
](
Variable
*
var
)
->
Tensor
*
{
return
var
==
nullptr
?
nullptr
:
var
->
GetMutable
<
LoDTensor
>
();
});
return
res
;
}
...
...
python/paddle/fluid/imperative/nn.py
浏览文件 @
ba981604
...
...
@@ -22,13 +22,7 @@ from . import layers
from
..framework
import
Variable
,
OpProtoHolder
from
..param_attr
import
ParamAttr
from
..initializer
import
Normal
,
Constant
__all__
=
[
'Conv2D'
,
'Pool2D'
,
'FC'
,
'BatchNorm'
,
'EMBEDDING'
]
__all__
=
[
'Conv2D'
,
'Pool2D'
,
'FC'
,
'BatchNorm'
,
'EMBEDDING'
]
class
Conv2D
(
layers
.
Layer
):
...
...
@@ -419,8 +413,6 @@ class BatchNorm(layers.Layer):
# Currently, we don't support inplace in imperative mode
return
self
.
_helper
.
append_activation
(
batch_norm_out
)
outputs
=
{
'Out'
:
[
bias_out
]},
class
EMBEDDING
(
layers
.
Layer
):
...
...
@@ -438,7 +430,7 @@ class EMBEDDING(layers.Layer):
self
.
_is_distributed
=
is_distributed
self
.
_padding_idx
=
-
1
if
padding_idx
is
None
else
padding_idx
if
padding_idx
>=
0
else
(
size
[
0
]
+
padding_idx
)
size
[
0
]
+
padding_idx
)
self
.
_param_attr
=
param_attr
self
.
_dtype
=
dtype
...
...
python/paddle/fluid/tests/unittests/test_imperative.py
浏览文件 @
ba981604
...
...
@@ -338,7 +338,6 @@ class TestImperative(unittest.TestCase):
dy_grad_i2h
=
simple_rnn
.
_cell
.
_i2h_w
.
_gradient
()
with
new_program_scope
():
print
(
"im here"
)
inp
=
fluid
.
layers
.
data
(
name
=
"inp"
,
shape
=
[
1
,
4
,
3
],
append_batch_size
=
False
)
simple_rnn
=
SimpleRNN
()
...
...
python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py
0 → 100644
浏览文件 @
ba981604
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
unittest
import
paddle.fluid
as
fluid
from
paddle.fluid.imperative.nn
import
EMBEDDING
import
paddle.fluid.framework
as
framework
from
paddle.fluid.optimizer
import
SGDOptimizer
from
paddle.fluid.imperative.base
import
to_variable
import
numpy
as
np
from
paddle.fluid.backward
import
append_backward
class
SimpleLSTMRNN
(
fluid
.
imperative
.
Layer
):
def
__init__
(
self
,
hidden_size
,
num_steps
,
num_layers
=
2
,
init_scale
=
0.1
,
dropout
=
None
):
super
(
SimpleLSTMRNN
,
self
).
__init__
()
self
.
_hidden_size
=
hidden_size
self
.
_num_layers
=
num_layers
self
.
_init_scale
=
init_scale
self
.
_dropout
=
dropout
self
.
input
=
None
self
.
num_steps
=
num_steps
def
_build_once
(
self
,
input_embedding
,
init_hidden
=
None
,
init_cell
=
None
):
self
.
weight_1_arr
=
[]
self
.
weight_2_arr
=
[]
self
.
bias_arr
=
[]
self
.
hidden_array
=
[]
self
.
cell_array
=
[]
self
.
mask_array
=
[]
for
i
in
range
(
self
.
_num_layers
):
weight_1
=
fluid
.
layers
.
create_parameter
(
shape
=
[
self
.
_hidden_size
*
2
,
self
.
_hidden_size
*
4
],
dtype
=
"float32"
,
name
=
"fc_weight1_"
+
str
(
i
),
default_initializer
=
fluid
.
initializer
.
UniformInitializer
(
low
=-
self
.
_init_scale
,
high
=
self
.
_init_scale
))
self
.
weight_1_arr
.
append
(
weight_1
)
bias_1
=
fluid
.
layers
.
create_parameter
(
[
self
.
_hidden_size
*
4
],
dtype
=
"float32"
,
name
=
"fc_bias1_"
+
str
(
i
),
default_initializer
=
fluid
.
initializer
.
Constant
(
0.0
))
self
.
bias_arr
.
append
(
bias_1
)
pre_hidden
=
fluid
.
layers
.
slice
(
init_hidden
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
])
pre_cell
=
fluid
.
layers
.
slice
(
init_cell
,
axes
=
[
0
],
starts
=
[
i
],
ends
=
[
i
+
1
])
pre_hidden
=
fluid
.
layers
.
reshape
(
pre_hidden
,
shape
=
[
-
1
,
self
.
_hidden_size
])
pre_cell
=
fluid
.
layers
.
reshape
(
pre_cell
,
shape
=
[
-
1
,
self
.
_hidden_size
])
self
.
hidden_array
.
append
(
pre_hidden
)
self
.
cell_array
.
append
(
pre_cell
)
def
forward
(
self
,
input_embedding
,
init_hidden
=
None
,
init_cell
=
None
):
res
=
[]
for
index
in
range
(
self
.
num_steps
):
self
.
input
=
fluid
.
layers
.
slice
(
input_embedding
,
axes
=
[
1
],
starts
=
[
index
],
ends
=
[
index
+
1
])
self
.
input
=
fluid
.
layers
.
reshape
(
self
.
input
,
shape
=
[
-
1
,
self
.
_hidden_size
])
for
k
in
range
(
self
.
_num_layers
):
pre_hidden
=
self
.
hidden_array
[
k
]
print
(
"pre_hidden shape is:{}"
.
format
(
pre_hidden
.
shape
))
print
(
"input shape is:{}"
.
format
(
self
.
input
.
shape
))
pre_cell
=
self
.
cell_array
[
k
]
weight_1
=
self
.
weight_1_arr
[
k
]
bias
=
self
.
bias_arr
[
k
]
nn
=
fluid
.
layers
.
concat
([
self
.
input
,
pre_hidden
],
1
)
gate_input
=
fluid
.
layers
.
matmul
(
x
=
nn
,
y
=
weight_1
)
gate_input
=
fluid
.
layers
.
elementwise_add
(
gate_input
,
bias
)
print
(
"gate_input shape is: {}"
.
format
(
gate_input
.
shape
))
print
(
"gate_input value is :{}"
.
format
(
gate_input
.
_numpy
()))
print
(
"gate_input desc is :{}"
.
format
(
gate_input
))
# i, j, f, o = fluid.layers.split(gate_input, num_or_sections=4, dim=-1)
# #
# # c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid(
# # i) * fluid.layers.tanh(j)
# # m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o)
# #
# # self.hidden_array[k] = m
# # self.cell_array[k] = c
# # self.input = m
# #
# # if self.dropout is not None and self.dropout > 0.0:
# # self.input = fluid.layers.dropout(
# # self.input,
# # dropout_prob=self.dropout,
# # dropout_implementation='upscale_in_train')
# #
# # res.append(
# # fluid.layers.reshape(
# # input, shape=[1, -1, self._hidden_size]))
# # real_res = fluid.layers.concat(res, 0)
# # real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2])
# # last_hidden = fluid.layers.concat(self.hidden_array, 1)
# # last_hidden = fluid.layers.reshape(
# # last_hidden, shape=[-1, self._num_layers, self._hidden_size])
# # last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2])
# # last_cell = fluid.layers.concat(self.cell_array, 1)
# # last_cell = fluid.layers.reshape(
# # last_cell, shape=[-1, self._num_layers, self._hidden_size])
# # last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2])
# #
# return real_res, last_hidden, last_cell
return
[
1
],
[
2
],
[
3
]
class
PtbModel
(
fluid
.
imperative
.
Layer
):
def
__init__
(
self
,
hidden_size
,
vocab_size
,
num_layers
=
2
,
num_steps
=
20
,
init_scale
=
0.1
,
dropout
=
None
):
super
(
PtbModel
,
self
).
__init__
()
self
.
hidden_size
=
hidden_size
self
.
vocab_size
=
vocab_size
self
.
init_scale
=
init_scale
self
.
num_layers
=
num_layers
self
.
num_steps
=
num_steps
self
.
dropout
=
dropout
self
.
simple_lstm_rnn
=
SimpleLSTMRNN
(
hidden_size
,
num_steps
,
num_layers
=
num_layers
,
init_scale
=
init_scale
,
dropout
=
dropout
)
self
.
embedding
=
EMBEDDING
(
size
=
[
vocab_size
,
hidden_size
],
dtype
=
'float32'
,
is_sparse
=
False
,
param_attr
=
fluid
.
ParamAttr
(
name
=
'embedding_para'
,
initializer
=
fluid
.
initializer
.
UniformInitializer
(
low
=-
init_scale
,
high
=
init_scale
)))
def
_build_once
(
self
,
input
,
label
,
init_hidden
,
init_cell
):
self
.
softmax_weight
=
fluid
.
layers
.
create_parameter
(
[
self
.
hidden_size
,
self
.
vocab_size
],
dtype
=
"float32"
,
name
=
"softmax_weight"
,
default_initializer
=
fluid
.
initializer
.
UniformInitializer
(
low
=-
self
.
init_scale
,
high
=
self
.
init_scale
))
self
.
softmax_bias
=
fluid
.
layers
.
create_parameter
(
[
self
.
vocab_size
],
dtype
=
"float32"
,
name
=
'softmax_bias'
,
default_initializer
=
fluid
.
initializer
.
UniformInitializer
(
low
=-
self
.
init_scale
,
high
=
self
.
init_scale
))
def
forward
(
self
,
input
,
label
,
init_hidden
,
init_cell
):
init_h
=
fluid
.
layers
.
reshape
(
init_hidden
,
shape
=
[
self
.
num_layers
,
-
1
,
self
.
hidden_size
])
init_c
=
fluid
.
layers
.
reshape
(
init_cell
,
shape
=
[
self
.
num_layers
,
-
1
,
self
.
hidden_size
])
x_emb
=
self
.
embedding
(
input
)
x_emb
=
fluid
.
layers
.
reshape
(
x_emb
,
shape
=
[
-
1
,
self
.
num_steps
,
self
.
hidden_size
])
if
self
.
dropout
is
not
None
and
self
.
dropout
>
0.0
:
x_emb
=
fluid
.
layers
.
dropout
(
x_emb
,
dropout_prob
=
self
.
drop_out
,
dropout_implementation
=
'upscale_in_train'
)
print
(
"init_c is {}"
.
format
(
init_c
))
rnn_out
,
last_hidden
,
last_cell
=
self
.
simple_lstm_rnn
(
x_emb
,
init_h
,
init_c
)
rnn_out
=
fluid
.
layers
.
reshape
(
rnn_out
,
shape
=
[
-
1
,
self
.
num_steps
,
self
.
hidden_size
])
projection
=
fluid
.
layers
.
reshape
(
rnn_out
,
self
.
softmax_weight
)
projection
=
fluid
.
layers
.
elementwise_add
(
projection
,
self
.
softmax_bias
)
projection
=
fluid
.
layers
.
reshape
(
projection
,
shape
=
[
-
1
,
self
.
vocab_size
])
projection
=
fluid
.
layers
.
reshape
(
projection
,
shape
=
[
-
1
,
self
.
vocab_size
])
loss
=
fluid
.
layers
.
softmax_with_cross_entropy
(
logits
=
projection
,
label
=
label
,
soft_label
=
False
)
loss
=
fluid
.
layers
.
reshape
(
loss
,
shape
=
[
-
1
,
self
.
num_steps
])
loss
=
fluid
.
layers
.
reduce_mean
(
loss
,
dim
=
[
0
])
loss
=
fluid
.
layers
.
reduce_sum
(
loss
)
loss
.
permissions
=
True
return
loss
,
last_hidden
,
last_cell
class
TestImperativePtbRnn
(
unittest
.
TestCase
):
def
test_mnist_cpu_float32
(
self
):
seed
=
90
hidden_size
=
10
vocab_size
=
1000
num_layers
=
1
num_steps
=
3
init_scale
=
0.1
batch_size
=
4
with
fluid
.
imperative
.
guard
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
# TODO: marsyang1993 Change seed to
ptb_model
=
PtbModel
(
hidden_size
=
hidden_size
,
vocab_size
=
vocab_size
,
num_layers
=
num_layers
,
num_steps
=
num_steps
,
init_scale
=
init_scale
)
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
print
(
"q"
)
for
i
in
range
(
2
):
x_data
=
np
.
arange
(
12
).
reshape
(
4
,
3
).
astype
(
'int64'
)
y_data
=
np
.
arange
(
1
,
13
).
reshape
(
4
,
3
).
astype
(
'int64'
)
x_data
=
x_data
.
reshape
((
-
1
,
num_steps
,
1
))
y_data
=
y_data
.
reshape
((
-
1
,
1
))
init_hidden_data
=
np
.
zeros
(
(
num_layers
,
batch_size
,
hidden_size
),
dtype
=
'float32'
)
init_cell_data
=
np
.
zeros
(
(
num_layers
,
batch_size
,
hidden_size
),
dtype
=
'float32'
)
x
=
to_variable
(
x_data
)
y
=
to_variable
(
y_data
)
init_hidden
=
to_variable
(
init_hidden_data
)
init_cell
=
to_variable
(
init_cell_data
)
dy_loss
,
last_hidden
,
last_cell
=
ptb_model
(
x
,
y
,
init_hidden
,
init_cell
)
dy_param_init
=
dict
()
if
i
==
0
:
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
dy_param_init
[
param
.
name
]
=
param
.
_numpy
()
dy_loss
.
_backward
()
sgd
.
minimize
(
dy_loss
)
dy_param_updated
=
dict
()
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
dy_param_updated
[
param
.
name
]
=
param
.
_numpy
()
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_imperative_split.py
0 → 100644
浏览文件 @
ba981604
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
unittest
import
paddle.fluid
as
fluid
from
paddle.fluid.imperative.base
import
to_variable
import
numpy
as
np
class
Split_test
(
fluid
.
imperative
.
Layer
):
def
__init__
(
self
):
super
(
Split_test
,
self
).
__init__
()
def
_build_once
(
self
,
input
):
pass
def
forward
(
self
,
input
):
out
=
fluid
.
layers
.
split
(
input
,
num_or_sections
=
4
,
dim
=-
1
)
return
out
class
TestImperativePtbRnn
(
unittest
.
TestCase
):
def
test_spilt
(
self
):
with
fluid
.
imperative
.
guard
():
inp
=
to_variable
(
np
.
arange
(
160
).
reshape
(
4
,
40
).
astype
(
'float32'
))
st
=
Split_test
()
out
=
st
(
inp
)
print
(
out
)
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录