Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
361dc27a
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
361dc27a
编写于
2月 21, 2017
作者:
Q
qiaolongfei
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into topology
上级
07539b2a
14ee4b80
变更
18
隐藏空白更改
内联
并排
Showing
18 changed file
with
339 addition
and
78 deletion
+339
-78
demo/sentiment/dataprovider.py
demo/sentiment/dataprovider.py
+2
-0
demo/sentiment/predict.py
demo/sentiment/predict.py
+5
-1
doc/api/trainer_config_helpers/layers.rst
doc/api/trainer_config_helpers/layers.rst
+6
-0
doc/design/reader/README.md
doc/design/reader/README.md
+3
-2
paddle/gserver/evaluators/Evaluator.cpp
paddle/gserver/evaluators/Evaluator.cpp
+13
-43
paddle/gserver/layers/PrintLayer.cpp
paddle/gserver/layers/PrintLayer.cpp
+8
-29
paddle/gserver/layers/SequenceConcatLayer.cpp
paddle/gserver/layers/SequenceConcatLayer.cpp
+4
-2
paddle/parameter/Argument.cpp
paddle/parameter/Argument.cpp
+38
-0
paddle/parameter/Argument.h
paddle/parameter/Argument.h
+17
-0
python/CMakeLists.txt
python/CMakeLists.txt
+1
-0
python/paddle/reader/__init__.py
python/paddle/reader/__init__.py
+23
-0
python/paddle/reader/decorator.py
python/paddle/reader/decorator.py
+60
-0
python/paddle/reader/tests/CMakeLists.txt
python/paddle/reader/tests/CMakeLists.txt
+4
-0
python/paddle/reader/tests/decorator_test.py
python/paddle/reader/tests/decorator_test.py
+50
-0
python/paddle/trainer_config_helpers/layers.py
python/paddle/trainer_config_helpers/layers.py
+55
-0
python/paddle/trainer_config_helpers/tests/configs/file_list.sh
.../paddle/trainer_config_helpers/tests/configs/file_list.sh
+2
-1
python/paddle/trainer_config_helpers/tests/configs/protostr/test_seq_concat.protostr
...g_helpers/tests/configs/protostr/test_seq_concat.protostr
+39
-0
python/paddle/trainer_config_helpers/tests/configs/test_seq_concat.py
...e/trainer_config_helpers/tests/configs/test_seq_concat.py
+9
-0
未找到文件。
demo/sentiment/dataprovider.py
浏览文件 @
361dc27a
...
...
@@ -32,4 +32,6 @@ def process(settings, file_name):
word_slot
=
[
settings
.
word_dict
[
w
]
for
w
in
words
if
w
in
settings
.
word_dict
]
if
not
word_slot
:
continue
yield
word_slot
,
label
demo/sentiment/predict.py
浏览文件 @
361dc27a
...
...
@@ -138,7 +138,11 @@ def main():
batch
=
[]
for
line
in
sys
.
stdin
:
batch
.
append
([
predict
.
get_index
(
line
)])
words
=
predict
.
get_index
(
line
)
if
words
:
batch
.
append
([
words
])
else
:
print
(
'All the words in [%s] are not in the dictionary.'
%
line
)
if
len
(
batch
)
==
batch_size
:
predict
.
batch_predict
(
batch
)
batch
=
[]
...
...
doc/api/trainer_config_helpers/layers.rst
浏览文件 @
361dc27a
...
...
@@ -279,6 +279,12 @@ concat_layer
:members: concat_layer
:noindex:
seq_concat_layer
----------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: seq_concat_layer
:noindex:
Reshaping Layers
================
...
...
doc/design/reader/README.md
浏览文件 @
361dc27a
...
...
@@ -107,7 +107,7 @@ We decided to use dictionary (`{"image":0, "label":1}`) instead of list (`["imag
### How to create custom data reader
```
python
def
image_reader
(
image_path
,
label_path
):
def
image_reader
(
image_path
,
label_path
,
n
):
f
=
open
(
image_path
)
l
=
open
(
label_path
)
images
=
numpy
.
fromfile
(
...
...
@@ -117,9 +117,10 @@ def image_reader(image_path, label_path):
for
i
in
xrange
(
n
):
yield
images
[
i
,
:],
labels
[
i
]
# a single entry of data is created each time
f
.
close
()
l
.
close
()
# use python lambda to change image_reader into a function with no parameters.
reader
=
lambda
:
image_reader
(
"/path/to/image_file"
,
"/path/to/label_file"
)
reader
=
lambda
:
image_reader
(
"/path/to/image_file"
,
"/path/to/label_file"
,
1024
)
paddle
.
train
(
reader
,
{
"image"
:
0
,
"label"
:
1
},
...)
```
...
...
paddle/gserver/evaluators/Evaluator.cpp
浏览文件 @
361dc27a
...
...
@@ -866,21 +866,20 @@ void PnpairEvaluator::calc(std::vector<PredictionResult>& predictArray) {
ClassRegistrar
<
Evaluator
>
Evaluator
::
registrar_
;
Evaluator
*
Evaluator
::
create
(
const
EvaluatorConfig
&
config
)
{
Evaluator
*
evaluator
=
nullptr
;
if
(
config
.
type
()
==
"classification_error"
)
{
evaluator
=
new
ClassificationErrorEvaluator
();
}
else
if
(
config
.
type
()
==
"sum"
)
{
evaluator
=
new
SumEvaluator
();
}
else
if
(
config
.
type
()
==
"last-column-sum"
)
{
evaluator
=
new
ColumnSumEvaluator
(
-
1
);
}
else
if
(
config
.
type
()
==
"last-column-auc"
)
{
evaluator
=
new
AucEvaluator
(
-
1
);
}
else
{
evaluator
=
registrar_
.
createByType
(
config
.
type
());
}
Evaluator
*
evaluator
=
registrar_
.
createByType
(
config
.
type
());
evaluator
->
init
(
config
);
return
evaluator
;
}
REGISTER_EVALUATOR
(
classification_error
,
ClassificationErrorEvaluator
);
REGISTER_EVALUATOR
(
sum
,
SumEvaluator
);
static
InitFunction
__reg_type_auc_sum__
([]()
{
Evaluator
::
registrar_
.
registerClass
(
"last-column-sum"
,
[]
{
return
new
ColumnSumEvaluator
(
-
1
);
});
Evaluator
::
registrar_
.
registerClass
(
"last-column-auc"
,
[]
{
return
new
AucEvaluator
(
-
1
);
});
});
/**
* @brief print value of each layer.
*
...
...
@@ -888,32 +887,10 @@ Evaluator* Evaluator::create(const EvaluatorConfig& config) {
*/
class
ValuePrinter
:
public
Evaluator
{
public:
ValuePrinter
()
{}
virtual
void
eval
(
const
NeuralNetwork
&
nn
)
{
for
(
const
std
::
string
&
name
:
config_
.
input_layers
())
{
const
Argument
&
argu
=
nn
.
getLayer
(
name
)
->
getOutput
();
if
(
argu
.
value
)
{
std
::
ostringstream
os
;
argu
.
value
->
print
(
os
);
LOG
(
INFO
)
<<
"layer="
<<
name
<<
" value matrix:
\n
"
<<
os
.
str
();
}
if
(
argu
.
ids
)
{
std
::
ostringstream
os
;
argu
.
ids
->
print
(
os
,
argu
.
ids
->
getSize
());
LOG
(
INFO
)
<<
"layer="
<<
name
<<
" ids vector:
\n
"
<<
os
.
str
();
}
if
(
auto
startPos
=
argu
.
sequenceStartPositions
)
{
std
::
ostringstream
os
;
startPos
->
getVector
(
false
)
->
print
(
os
,
startPos
->
getSize
());
LOG
(
INFO
)
<<
"layer="
<<
name
<<
" sequence pos vector:
\n
"
<<
os
.
str
();
}
if
(
auto
subStartPos
=
argu
.
subSequenceStartPositions
)
{
std
::
ostringstream
os
;
subStartPos
->
getVector
(
false
)
->
print
(
os
,
subStartPos
->
getSize
());
LOG
(
INFO
)
<<
"layer="
<<
name
<<
" sub-sequence pos vector:
\n
"
<<
os
.
str
();
}
nn
.
getLayer
(
name
)
->
getOutput
().
printValueString
(
LOG
(
INFO
),
"layer="
+
name
+
" "
);
}
}
...
...
@@ -929,8 +906,6 @@ REGISTER_EVALUATOR(value_printer, ValuePrinter);
*/
class
GradientPrinter
:
public
Evaluator
{
public:
GradientPrinter
()
{}
virtual
void
eval
(
const
NeuralNetwork
&
nn
)
{
for
(
const
std
::
string
&
name
:
config_
.
input_layers
())
{
const
Argument
&
argu
=
nn
.
getLayer
(
name
)
->
getOutput
();
...
...
@@ -939,11 +914,6 @@ public:
argu
.
grad
->
print
(
os
);
LOG
(
INFO
)
<<
"layer="
<<
name
<<
" grad matrix:
\n
"
<<
os
.
str
();
}
if
(
auto
startPos
=
argu
.
sequenceStartPositions
)
{
std
::
ostringstream
os
;
startPos
->
getVector
(
false
)
->
print
(
os
,
startPos
->
getSize
());
LOG
(
INFO
)
<<
"layer="
<<
name
<<
" sequence pos vector:
\n
"
<<
os
.
str
();
}
}
}
...
...
paddle/gserver/layers/PrintLayer.cpp
浏览文件 @
361dc27a
...
...
@@ -19,38 +19,17 @@ namespace paddle {
class
PrintLayer
:
public
Layer
{
public:
explicit
PrintLayer
(
const
LayerConfig
&
config
)
:
Layer
(
config
)
{}
void
forward
(
PassType
passType
)
override
;
void
backward
(
const
UpdateCallback
&
callback
)
override
{}
};
void
PrintLayer
::
forward
(
PassType
passType
)
{
Layer
::
forward
(
passType
);
for
(
size_t
i
=
0
;
i
!=
inputLayers_
.
size
();
++
i
)
{
const
auto
&
argu
=
getInput
(
i
);
const
std
::
string
&
name
=
inputLayers_
[
i
]
->
getName
();
if
(
argu
.
value
)
{
std
::
ostringstream
os
;
argu
.
value
->
print
(
os
);
LOG
(
INFO
)
<<
"layer="
<<
name
<<
" value matrix:
\n
"
<<
os
.
str
();
}
if
(
argu
.
ids
)
{
std
::
ostringstream
os
;
argu
.
ids
->
print
(
os
,
argu
.
ids
->
getSize
());
LOG
(
INFO
)
<<
"layer="
<<
name
<<
" ids vector:
\n
"
<<
os
.
str
();
}
if
(
auto
startPos
=
argu
.
sequenceStartPositions
)
{
std
::
ostringstream
os
;
startPos
->
getVector
(
false
)
->
print
(
os
,
startPos
->
getSize
());
LOG
(
INFO
)
<<
"layer="
<<
name
<<
" sequence pos vector:
\n
"
<<
os
.
str
();
}
if
(
auto
subStartPos
=
argu
.
subSequenceStartPositions
)
{
std
::
ostringstream
os
;
subStartPos
->
getVector
(
false
)
->
print
(
os
,
subStartPos
->
getSize
());
LOG
(
INFO
)
<<
"layer="
<<
name
<<
" sub-sequence pos vector:
\n
"
<<
os
.
str
();
void
forward
(
PassType
passType
)
override
{
Layer
::
forward
(
passType
);
for
(
size_t
i
=
0
;
i
!=
inputLayers_
.
size
();
++
i
)
{
getInput
(
i
).
printValueString
(
LOG
(
INFO
),
"layer="
+
inputLayers_
[
i
]
->
getName
()
+
" "
);
}
}
}
void
backward
(
const
UpdateCallback
&
callback
)
override
{}
};
REGISTER_LAYER
(
print
,
PrintLayer
);
...
...
paddle/gserver/layers/SequenceConcatLayer.cpp
浏览文件 @
361dc27a
...
...
@@ -21,9 +21,11 @@ namespace paddle {
/**
* A layer for concatenating the first sequence with the second sequence
* following the first
* Input: two sequences each containing some instances
* Input: two sequences each containing the same number of instances
* seq1 = [a1, a2, ..., an]
* seq2 = [b1, b2, ..., bn]
* Output: a concatenated sequence of the two input sequences
* out = [a1, b1, a2, b2, ..., an, bn]
*/
class
SequenceConcatLayer
:
public
Layer
{
...
...
paddle/parameter/Argument.cpp
浏览文件 @
361dc27a
...
...
@@ -602,6 +602,44 @@ void Argument::degradeSequence(const Argument& input, bool useGpu) {
tgtBuf
[
numSequences
]
=
numSubSequences
;
}
void
Argument
::
getValueString
(
std
::
unordered_map
<
std
::
string
,
std
::
string
>*
out
)
const
{
if
(
value
)
{
std
::
ostringstream
os
;
value
->
print
(
os
);
out
->
insert
({
"value"
,
os
.
str
()});
}
if
(
ids
)
{
std
::
ostringstream
os
;
ids
->
print
(
os
,
ids
->
getSize
());
out
->
insert
({
"ids"
,
os
.
str
()});
}
if
(
sequenceStartPositions
)
{
std
::
ostringstream
os
;
sequenceStartPositions
->
getVector
(
false
)
->
print
(
os
,
sequenceStartPositions
->
getSize
());
out
->
insert
({
"sequence pos"
,
os
.
str
()});
}
if
(
subSequenceStartPositions
)
{
std
::
ostringstream
os
;
subSequenceStartPositions
->
getVector
(
false
)
->
print
(
os
,
subSequenceStartPositions
->
getSize
());
out
->
insert
({
"sub-sequence pos"
,
os
.
str
()});
}
}
void
Argument
::
printValueString
(
std
::
ostream
&
stream
,
const
std
::
string
&
prefix
)
const
{
std
::
unordered_map
<
std
::
string
,
std
::
string
>
out
;
getValueString
(
&
out
);
for
(
auto
field
:
{
"value"
,
"id"
,
"sequence pos"
,
"sub-sequence pos"
})
{
auto
it
=
out
.
find
(
field
);
if
(
it
!=
out
.
end
())
{
stream
<<
prefix
<<
field
<<
":
\n
"
<<
it
->
second
;
}
}
}
void
Argument
::
subArgFrom
(
const
Argument
&
input
,
size_t
offset
,
size_t
height
,
...
...
paddle/parameter/Argument.h
浏览文件 @
361dc27a
...
...
@@ -297,6 +297,23 @@ struct Argument {
sequence has sub-sequence degrades to a sequence.
*/
void
degradeSequence
(
const
Argument
&
input
,
bool
useGpu
);
/**
* @brief getValueString will return the argument's output in string. There
* are several kinds of output. The keys of output dictionary are 'value',
* 'id', 'sequence pos', 'sub-sequence pos'.
* @param out [out]: the return values.
*/
void
getValueString
(
std
::
unordered_map
<
std
::
string
,
std
::
string
>*
out
)
const
;
/**
* @brief printValueString will print the argument's output in order of
* 'value', 'id', 'sequence pos', 'sub-sequence pos'.
* @param stream: Output stream
* @param prefix: line prefix for printing.
*/
void
printValueString
(
std
::
ostream
&
stream
,
const
std
::
string
&
prefix
=
""
)
const
;
};
}
// namespace paddle
python/CMakeLists.txt
浏览文件 @
361dc27a
...
...
@@ -24,6 +24,7 @@ add_custom_target(paddle_python ALL DEPENDS
${
OUTPUT_DIR
}
/.timestamp
)
add_subdirectory
(
paddle/trainer_config_helpers/tests
)
add_subdirectory
(
paddle/reader/tests
)
install
(
DIRECTORY
${
CMAKE_CURRENT_BINARY_DIR
}
/dist/
DESTINATION opt/paddle/share/wheels
...
...
python/paddle/reader/__init__.py
0 → 100644
浏览文件 @
361dc27a
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# It would be too lengthy to require our users to prefix decorators with `decorator`.
# For example, we want the following line
#
# r = paddle.reader.decorator.bufferd(paddle.reader.creator.text("hello.txt"))
#
# to be a shorter version:
#
# r = paddle.reader.buffered(paddle.reader.creator.text("hello.txt"))
from
decorator
import
*
python/paddle/reader/decorator.py
0 → 100644
浏览文件 @
361dc27a
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__
=
[
'buffered'
]
from
Queue
import
Queue
from
threading
import
Thread
def
buffered
(
reader
,
size
):
"""Creates a buffered data reader.
The buffered data reader will read and save data entries into a buffer.
Reading from the buffered data reader will proceed as long as the buffer
is not empty.
Args:
reader: the data reader to read from.
size: max buffer size.
Returns:
The buffered data reader.
"""
class
EndSignal
():
pass
end
=
EndSignal
()
def
read_worker
(
r
,
q
):
for
d
in
r
:
q
.
put
(
d
)
q
.
put
(
end
)
def
create_reader
():
r
=
reader
()
q
=
Queue
(
maxsize
=
size
)
t
=
Thread
(
target
=
read_worker
,
args
=
(
r
,
q
,
))
t
.
daemon
=
True
t
.
start
()
e
=
q
.
get
()
while
e
!=
end
:
yield
e
e
=
q
.
get
()
return
create_reader
python/paddle/reader/tests/CMakeLists.txt
0 → 100644
浏览文件 @
361dc27a
add_test
(
NAME reader_decorator_test
COMMAND
${
PROJ_ROOT
}
/paddle/.set_python_path.sh -d
${
PROJ_ROOT
}
/python/
${
PYTHON_EXECUTABLE
}
${
PROJ_ROOT
}
/python/paddle/reader/tests/decorator_test.py
WORKING_DIRECTORY
${
PROJ_ROOT
}
/python/paddle
)
python/paddle/reader/tests/decorator_test.py
0 → 100644
浏览文件 @
361dc27a
# Copyright PaddlePaddle contributors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
paddle.reader
import
time
def
reader_10
(
dur
):
for
i
in
range
(
10
):
time
.
sleep
(
dur
)
yield
i
class
TestBuffered
(
unittest
.
TestCase
):
def
test_read
(
self
):
for
size
in
range
(
20
):
b
=
paddle
.
reader
.
buffered
(
lambda
:
reader_10
(
0
),
size
)
c
=
0
for
i
in
b
():
self
.
assertEqual
(
i
,
c
)
c
+=
1
self
.
assertEqual
(
c
,
10
)
def
test_buffering
(
self
):
# read have 30ms delay.
b
=
paddle
.
reader
.
buffered
(
lambda
:
reader_10
(
0.03
),
10
)
last_time
=
time
.
time
()
for
idx
,
i
in
enumerate
(
b
()):
elapsed_time
=
time
.
time
()
-
last_time
if
i
==
0
:
time
.
sleep
(
0.3
)
else
:
# read time should be short, meaning already buffered.
self
.
assertLess
(
elapsed_time
,
0.01
)
last_time
=
time
.
time
()
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/trainer_config_helpers/layers.py
浏览文件 @
361dc27a
...
...
@@ -59,6 +59,7 @@ __all__ = [
'img_cmrnorm_layer'
,
'addto_layer'
,
'concat_layer'
,
'seq_concat_layer'
,
'lstm_step_layer'
,
'recurrent_group'
,
'memory'
,
...
...
@@ -144,6 +145,7 @@ class LayerType(object):
CONCAT_LAYER
=
'concat'
CONCAT_PROJ_LAYER
=
'concat2'
SEQUENCE_CONCAT_LAYER
=
'seqconcat'
LSTM_STEP_LAYER
=
'lstm_step'
GRU_STEP_LAYER
=
'gru_step'
...
...
@@ -2570,6 +2572,59 @@ def concat_layer(input, act=None, name=None, layer_attr=None, bias_attr=None):
size
=
sz
)
@
wrap_name_default
(
"seqconcat"
)
@
wrap_act_default
(
act
=
IdentityActivation
())
@
wrap_bias_attr_default
(
has_bias
=
False
)
@
layer_support
()
def
seq_concat_layer
(
a
,
b
,
act
=
None
,
name
=
None
,
layer_attr
=
None
,
bias_attr
=
None
):
"""
Concat sequence a with sequence b.
Inputs:
- a = [a1, a2, ..., an]
- b = [b1, b2, ..., bn]
- Note that the length of a and b should be the same.
Output: [a1, b1, a2, b2, ..., an, bn]
The example usage is:
.. code-block:: python
concat = seq_concat_layer(a=layer1, b=layer2)
:param name: Layer name.
:type name: basestring
:param a: input sequence layer
:type a: LayerOutput
:param b: input sequence layer
:type b: LayerOutput
:param act: Activation type.
:type act: BaseActivation
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert
isinstance
(
a
,
LayerOutput
)
and
isinstance
(
b
,
LayerOutput
)
assert
a
.
size
==
b
.
size
Layer
(
name
=
name
,
type
=
LayerType
.
SEQUENCE_CONCAT_LAYER
,
inputs
=
[
a
.
name
,
b
.
name
],
active_type
=
act
.
name
,
bias
=
ParamAttr
.
to_bias
(
bias_attr
),
**
ExtraLayerAttribute
.
to_kwargs
(
layer_attr
))
return
LayerOutput
(
name
,
layer_type
=
LayerType
.
SEQUENCE_CONCAT_LAYER
,
parents
=
[
a
,
b
],
activation
=
act
,
size
=
a
.
size
)
def
memory
(
name
,
size
,
is_seq
=
False
,
...
...
python/paddle/trainer_config_helpers/tests/configs/file_list.sh
浏览文件 @
361dc27a
...
...
@@ -4,6 +4,7 @@ test_sequence_pooling test_lstmemory_layer test_grumemory_layer
last_first_seq test_expand_layer test_ntm_layers test_hsigmoid
img_layers img_trans_layers util_layers simple_rnn_layers unused_layers test_cost_layers
test_rnn_group shared_fc shared_lstm shared_gru test_cost_layers_with_weight
test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops
)
test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops
test_seq_concat
)
export
whole_configs
=(
test_split_datasource
)
python/paddle/trainer_config_helpers/tests/configs/protostr/test_seq_concat.protostr
0 → 100644
浏览文件 @
361dc27a
type: "nn"
layers {
name: "data1"
type: "data"
size: 30
active_type: ""
}
layers {
name: "data2"
type: "data"
size: 30
active_type: ""
}
layers {
name: "__seqconcat_0__"
type: "seqconcat"
size: 30
active_type: ""
inputs {
input_layer_name: "data1"
}
inputs {
input_layer_name: "data2"
}
}
input_layer_names: "data1"
input_layer_names: "data2"
output_layer_names: "__seqconcat_0__"
sub_models {
name: "root"
layer_names: "data1"
layer_names: "data2"
layer_names: "__seqconcat_0__"
input_layer_names: "data1"
input_layer_names: "data2"
output_layer_names: "__seqconcat_0__"
is_recurrent_layer_group: false
}
python/paddle/trainer_config_helpers/tests/configs/test_seq_concat.py
0 → 100644
浏览文件 @
361dc27a
from
paddle.trainer_config_helpers
import
*
settings
(
batch_size
=
1000
,
learning_rate
=
1e-5
)
din1
=
data_layer
(
name
=
'data1'
,
size
=
30
)
din2
=
data_layer
(
name
=
'data2'
,
size
=
30
)
outputs
(
seq_concat_layer
(
a
=
din1
,
b
=
din2
))
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录