Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
23ac8459
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
23ac8459
编写于
8月 08, 2017
作者:
D
dongzhihong
浏览文件
操作
浏览文件
下载
差异文件
Merge remote-tracking branch 'origin/develop' into random_op
上级
555af4d0
6540701f
变更
21
隐藏空白更改
内联
并排
Showing
21 changed file
with
638 addition
and
35 deletion
+638
-35
.travis.yml
.travis.yml
+1
-1
doc/api/v2/config/layer.rst
doc/api/v2/config/layer.rst
+5
-0
doc/templates/conf.py.cn.in
doc/templates/conf.py.cn.in
+3
-7
doc/templates/conf.py.en.in
doc/templates/conf.py.en.in
+3
-7
paddle/framework/CMakeLists.txt
paddle/framework/CMakeLists.txt
+11
-10
paddle/framework/pybind.cc
paddle/framework/pybind.cc
+2
-0
paddle/gserver/layers/KmaxSeqScoreLayer.cpp
paddle/gserver/layers/KmaxSeqScoreLayer.cpp
+117
-0
paddle/gserver/tests/CMakeLists.txt
paddle/gserver/tests/CMakeLists.txt
+10
-0
paddle/gserver/tests/test_KmaxSeqScore.cpp
paddle/gserver/tests/test_KmaxSeqScore.cpp
+160
-0
paddle/operators/CMakeLists.txt
paddle/operators/CMakeLists.txt
+2
-0
paddle/operators/uniform_random_op.cc
paddle/operators/uniform_random_op.cc
+84
-0
paddle/operators/uniform_random_op.cu
paddle/operators/uniform_random_op.cu
+70
-0
paddle/scripts/travis/build_doc.sh
paddle/scripts/travis/build_doc.sh
+2
-7
proto/CMakeLists.txt
proto/CMakeLists.txt
+1
-1
python/paddle/trainer/config_parser.py
python/paddle/trainer/config_parser.py
+10
-0
python/paddle/trainer_config_helpers/layers.py
python/paddle/trainer_config_helpers/layers.py
+43
-1
python/paddle/trainer_config_helpers/tests/configs/file_list.sh
.../paddle/trainer_config_helpers/tests/configs/file_list.sh
+1
-1
python/paddle/trainer_config_helpers/tests/configs/protostr/test_kmax_seq_socre_layer.protostr
...tests/configs/protostr/test_kmax_seq_socre_layer.protostr
+66
-0
python/paddle/trainer_config_helpers/tests/configs/test_kmax_seq_socre_layer.py
...config_helpers/tests/configs/test_kmax_seq_socre_layer.py
+11
-0
python/paddle/v2/framework/tests/CMakeLists.txt
python/paddle/v2/framework/tests/CMakeLists.txt
+1
-0
python/paddle/v2/framework/tests/test_uniform_random_op.py
python/paddle/v2/framework/tests/test_uniform_random_op.py
+35
-0
未找到文件。
.travis.yml
浏览文件 @
23ac8459
...
@@ -38,7 +38,7 @@ before_install:
...
@@ -38,7 +38,7 @@ before_install:
# Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python
# Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python
# protobuf version.
# protobuf version.
-
pip install numpy wheel 'protobuf==3.1' sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit requests==2.9.2 LinkChecker
-
pip install numpy wheel 'protobuf==3.1' sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit requests==2.9.2 LinkChecker
-
pip install rarfile
-
pip install rarfile
nltk==3.2.2 scipy==0.19.0 recordio matplotlib Pillow
-
curl https://glide.sh/get | bash
-
curl https://glide.sh/get | bash
-
eval "$(GIMME_GO_VERSION=1.8.3 gimme)"
-
eval "$(GIMME_GO_VERSION=1.8.3 gimme)"
-
go get -u github.com/alecthomas/gometalinter
-
go get -u github.com/alecthomas/gometalinter
...
...
doc/api/v2/config/layer.rst
浏览文件 @
23ac8459
...
@@ -257,6 +257,11 @@ seq_concat
...
@@ -257,6 +257,11 @@ seq_concat
.. autoclass:: paddle.v2.layer.seq_concat
.. autoclass:: paddle.v2.layer.seq_concat
:noindex:
:noindex:
kmax_sequence_score
-------------------
.. autoclass:: paddle.v2.layer.kmax_sequence_score
:noindex:
sub_nested_seq
sub_nested_seq
--------------
--------------
.. autoclass:: paddle.v2.layer.sub_nested_seq
.. autoclass:: paddle.v2.layer.sub_nested_seq
...
...
doc/templates/conf.py.cn.in
浏览文件 @
23ac8459
...
@@ -13,15 +13,11 @@
...
@@ -13,15 +13,11 @@
# serve to show the default.
# serve to show the default.
import sys
import sys
import os, subprocess
import os, subprocess
sys.path.insert(0, os.path.abspath('@PROJ_ROOT@/python'))
import shlex
import shlex
from recommonmark import parser, transform
from recommonmark import parser, transform
try:
import paddle
import py_paddle
import paddle.v2
import paddle
import paddle.v2
except ImportError:
print("Must install paddle python package before generating documentation")
sys.exit(1)
MarkdownParser = parser.CommonMarkParser
MarkdownParser = parser.CommonMarkParser
AutoStructify = transform.AutoStructify
AutoStructify = transform.AutoStructify
...
...
doc/templates/conf.py.en.in
浏览文件 @
23ac8459
...
@@ -13,15 +13,11 @@
...
@@ -13,15 +13,11 @@
# serve to show the default.
# serve to show the default.
import sys
import sys
import os, subprocess
import os, subprocess
sys.path.insert(0, os.path.abspath('@PROJ_ROOT@/python'))
import shlex
import shlex
from recommonmark import parser, transform
from recommonmark import parser, transform
try:
import paddle
import py_paddle
import paddle.v2
import paddle
import paddle.v2
except ImportError:
print("Must install paddle python package before generating documentation")
sys.exit(1)
MarkdownParser = parser.CommonMarkParser
MarkdownParser = parser.CommonMarkParser
...
...
paddle/framework/CMakeLists.txt
浏览文件 @
23ac8459
...
@@ -38,14 +38,15 @@ cc_test(backward_test SRCS backward_test.cc DEPS backward)
...
@@ -38,14 +38,15 @@ cc_test(backward_test SRCS backward_test.cc DEPS backward)
if
(
WITH_PYTHON
)
if
(
WITH_PYTHON
)
cc_library
(
paddle_pybind SHARED
cc_library
(
paddle_pybind SHARED
SRCS pybind.cc
SRCS pybind.cc
DEPS pybind python backward
DEPS pybind python backward
fc_op
fc_op
sgd_op
sgd_op
add_op
add_op
mean_op
mean_op
cross_entropy_op
cross_entropy_op
gaussian_random_op
recurrent_op
fill_zeros_like_op
uniform_random_op
recurrent_op
)
gaussian_random_op
fill_zeros_like_op
)
endif
(
WITH_PYTHON
)
endif
(
WITH_PYTHON
)
paddle/framework/pybind.cc
浏览文件 @
23ac8459
...
@@ -43,6 +43,8 @@ USE_OP(rowwise_add);
...
@@ -43,6 +43,8 @@ USE_OP(rowwise_add);
USE_OP
(
fill_zeros_like
);
USE_OP
(
fill_zeros_like
);
USE_OP_WITHOUT_KERNEL
(
recurrent_op
);
USE_OP_WITHOUT_KERNEL
(
recurrent_op
);
USE_OP
(
gaussian_random
);
USE_OP
(
gaussian_random
);
USE_OP
(
uniform_random
);
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
template
<
typename
ClassType
>
template
<
typename
ClassType
>
...
...
paddle/gserver/layers/KmaxSeqScoreLayer.cpp
0 → 100644
浏览文件 @
23ac8459
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "Layer.h"
namespace
paddle
{
class
KmaxSeqScoreLayer
:
public
Layer
{
private:
MatrixPtr
scores_
;
size_t
beamSize_
;
void
kmaxScorePerSeq
(
const
real
*
score
,
real
*
sortedRes
,
const
ICpuGpuVectorPtr
seqStartPos
);
public:
explicit
KmaxSeqScoreLayer
(
const
LayerConfig
&
config
)
:
Layer
(
config
)
{}
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
override
;
void
forward
(
PassType
passType
)
override
;
void
backward
(
const
UpdateCallback
&
callback
=
nullptr
)
override
;
};
REGISTER_LAYER
(
kmax_seq_score
,
KmaxSeqScoreLayer
);
bool
KmaxSeqScoreLayer
::
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
bool
ret
=
Layer
::
init
(
layerMap
,
parameterMap
);
CHECK_EQ
(
1U
,
inputLayers_
.
size
());
beamSize_
=
config_
.
beam_size
();
CHECK_GE
(
beamSize_
,
1U
);
setNeedSequenceInfo
(
false
);
setNeedGradient
(
false
);
return
ret
;
}
void
KmaxSeqScoreLayer
::
kmaxScorePerSeq
(
const
real
*
scores
,
real
*
sortedIds
,
const
ICpuGpuVectorPtr
seqStartPos
)
{
int
*
starts
=
seqStartPos
->
getMutableData
(
false
);
std
::
vector
<
real
>
indices
;
for
(
size_t
i
=
0
;
i
<
seqStartPos
->
getSize
()
-
1
;
++
i
)
{
int
seqLen
=
starts
[
i
+
1
]
-
starts
[
i
];
int
k
=
std
::
min
(
static_cast
<
int
>
(
beamSize_
),
seqLen
);
indices
.
resize
(
seqLen
,
0
);
std
::
iota
(
begin
(
indices
),
end
(
indices
),
0.
);
std
::
vector
<
real
>
tmpScore
(
scores
+
starts
[
i
],
scores
+
starts
[
i
+
1
]);
std
::
partial_sort
(
begin
(
indices
),
begin
(
indices
)
+
k
,
end
(
indices
),
[
&
](
size_t
a
,
size_t
b
)
{
return
tmpScore
[
a
]
>
tmpScore
[
b
];
});
memcpy
(
sortedIds
+
(
i
*
beamSize_
),
indices
.
data
(),
k
*
sizeof
(
real
));
}
}
void
KmaxSeqScoreLayer
::
forward
(
PassType
passType
)
{
Layer
::
forward
(
passType
);
const
Argument
&
input
=
getInput
(
0
);
const
MatrixPtr
inputScore
=
getInputValue
(
0
);
CHECK
(
input
.
hasSeq
()
||
input
.
hasSubseq
())
<<
"input of "
<<
getName
()
<<
" must be a sequence or a nested sequence."
;
CHECK_EQ
(
input
.
value
->
getWidth
(),
1UL
)
<<
"input of "
<<
getName
()
<<
" is score over a sequence or a nested sequence, so its width "
<<
" must be 1."
;
if
(
useGpu_
)
{
// this Layer runs only in CPU, if the model is runing on GPU,
// then copy the input to this layer from GPU to CPU.
Matrix
::
resizeOrCreate
(
scores_
,
inputScore
->
getHeight
(),
1
,
false
/* trans */
,
false
/* useGpu */
);
scores_
->
copyFrom
(
*
inputScore
);
}
else
{
scores_
=
inputScore
;
}
Matrix
::
resizeOrCreate
(
output_
.
value
,
input
.
hasSubseq
()
?
input
.
getNumSubSequences
()
:
input
.
getNumSequences
(),
beamSize_
,
false
,
false
);
output_
.
value
->
one
();
output_
.
value
->
mulScalar
(
-
1.
);
kmaxScorePerSeq
(
scores_
->
getData
(),
output_
.
value
->
getData
(),
input
.
hasSubseq
()
?
input
.
subSequenceStartPositions
:
input
.
sequenceStartPositions
);
}
void
KmaxSeqScoreLayer
::
backward
(
const
UpdateCallback
&
callback
)
{}
}
// namespace paddle
paddle/gserver/tests/CMakeLists.txt
浏览文件 @
23ac8459
...
@@ -66,6 +66,16 @@ add_unittest_without_exec(test_BatchNorm
...
@@ -66,6 +66,16 @@ add_unittest_without_exec(test_BatchNorm
add_test
(
NAME test_BatchNorm
add_test
(
NAME test_BatchNorm
COMMAND test_BatchNorm
)
COMMAND test_BatchNorm
)
################# test_KmaxSeqScore #######################
add_unittest_without_exec
(
test_KmaxSeqScore
test_KmaxSeqScore.cpp
LayerGradUtil.cpp
)
add_test
(
NAME test_KmaxSeqScore
COMMAND test_KmaxSeqScore
)
################## test_Evaluator #######################
################## test_Evaluator #######################
add_unittest
(
test_Evaluator
add_unittest
(
test_Evaluator
test_Evaluator.cpp
)
test_Evaluator.cpp
)
...
...
paddle/gserver/tests/test_KmaxSeqScore.cpp
0 → 100644
浏览文件 @
23ac8459
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <algorithm>
#include <string>
#include <vector>
#include "ModelConfig.pb.h"
#include "paddle/gserver/layers/DataLayer.h"
#include "paddle/trainer/Trainer.h"
#include "paddle/utils/GlobalConstants.h"
#include "LayerGradUtil.h"
#include "paddle/testing/TestUtil.h"
using
namespace
paddle
;
// NOLINT
using
namespace
std
;
// NOLINT
DECLARE_bool
(
use_gpu
);
DECLARE_int32
(
gpu_id
);
DECLARE_bool
(
thread_local_rand_use_global_seed
);
vector
<
int
>
randSampling
(
int
range
,
int
n
)
{
CHECK_GE
(
range
,
n
);
vector
<
int
>
num
(
range
);
iota
(
begin
(
num
),
end
(
num
),
0
);
if
(
range
==
n
)
return
num
;
random_shuffle
(
begin
(
num
),
end
(
num
));
num
.
resize
(
n
);
return
num
;
}
void
genRandomSeqInfo
(
vector
<
int
>&
seqStartPosition
,
vector
<
int
>&
subSeqStartPosition
)
{
const
int
maxSeqNum
=
100
;
// generate random start position information
int
seqNum
=
1
+
(
rand
()
%
maxSeqNum
);
seqStartPosition
.
resize
(
seqNum
+
1
,
0
);
subSeqStartPosition
.
resize
(
1
,
0
);
for
(
int
i
=
0
;
i
<
seqNum
;
++
i
)
{
int
subSeqLen
=
1
+
(
rand
()
%
maxSeqNum
);
for
(
int
j
=
0
;
j
<
subSeqLen
;
++
j
)
subSeqStartPosition
.
push_back
(
subSeqStartPosition
.
back
()
+
subSeqLen
);
seqStartPosition
[
i
+
1
]
=
subSeqStartPosition
.
back
();
}
}
void
genRandomGroundTruth
(
real
*
values
,
vector
<
vector
<
int
>>&
groundTruth
,
vector
<
int
>&
startPos
,
size_t
beamSize
)
{
groundTruth
.
resize
(
startPos
.
size
()
-
1
,
vector
<
int
>
(
beamSize
,
-
1
));
for
(
size_t
i
=
0
;
i
<
startPos
.
size
()
-
1
;
++
i
)
{
int
seqLen
=
startPos
[
i
+
1
]
-
startPos
[
i
];
vector
<
int
>
pos
=
randSampling
(
seqLen
,
min
(
static_cast
<
int
>
(
beamSize
),
seqLen
));
for
(
size_t
j
=
0
;
j
<
pos
.
size
();
++
j
)
{
groundTruth
[
i
][
j
]
=
pos
[
j
];
values
[
startPos
[
i
]
+
pos
[
j
]]
=
1.
;
}
}
}
void
checkLayerOut
(
vector
<
vector
<
int
>>
groundTruth
,
real
*
layerOut
,
size_t
beamSize
)
{
for
(
size_t
i
=
0
;
i
<
groundTruth
.
size
();
++
i
)
{
int
begPos
=
i
*
beamSize
;
vector
<
real
>
tmp
(
layerOut
+
begPos
,
layerOut
+
begPos
+
beamSize
);
sort
(
begin
(
tmp
),
end
(
tmp
));
sort
(
begin
(
groundTruth
[
i
]),
end
(
groundTruth
[
i
]));
for
(
size_t
j
=
0
;
j
<
beamSize
;
++
j
)
CHECK_EQ
(
tmp
[
j
],
groundTruth
[
i
][
j
]);
}
}
TEST
(
Layer
,
kmaxSeqScoreLayer
)
{
const
size_t
maxBeamSize
=
100
;
int
beamSize
=
1
+
(
rand
()
%
maxBeamSize
);
vector
<
int
>
seqStartPosition
;
vector
<
int
>
subSeqStartPosition
;
genRandomSeqInfo
(
seqStartPosition
,
subSeqStartPosition
);
MatrixPtr
inValue
=
Matrix
::
create
(
subSeqStartPosition
.
back
(),
1
,
false
,
false
);
for
(
auto
hasSubseq
:
{
false
,
true
})
{
vector
<
vector
<
int
>>
groundTruth
;
inValue
->
randomizeUniform
();
genRandomGroundTruth
(
inValue
->
getData
(),
groundTruth
,
hasSubseq
?
subSeqStartPosition
:
seqStartPosition
,
beamSize
);
for
(
auto
useGpu
:
{
false
,
true
})
{
TestConfig
config
;
config
.
layerConfig
.
set_type
(
"kmax_seq_score"
);
config
.
layerConfig
.
set_beam_size
(
beamSize
);
if
(
hasSubseq
)
{
config
.
inputDefs
.
push_back
({
INPUT_SELF_DEFINE_DATA
,
"scores"
,
inValue
,
seqStartPosition
,
subSeqStartPosition
});
}
else
{
config
.
inputDefs
.
push_back
(
{
INPUT_SELF_DEFINE_DATA
,
"scores"
,
inValue
,
seqStartPosition
});
}
config
.
layerConfig
.
add_inputs
();
// data layer initialize
std
::
vector
<
DataLayerPtr
>
dataLayers
;
LayerMap
layerMap
;
vector
<
Argument
>
datas
;
initDataLayer
(
config
,
&
dataLayers
,
&
datas
,
&
layerMap
,
"kmax_seq_score"
,
100
/* actually this parameter is unused in self-defined input*/
,
false
,
useGpu
);
// test layer initialize
std
::
vector
<
ParameterPtr
>
parameters
;
LayerPtr
kmaxSeqScoreLayer
;
FLAGS_use_gpu
=
useGpu
;
initTestLayer
(
config
,
&
layerMap
,
&
parameters
,
&
kmaxSeqScoreLayer
);
kmaxSeqScoreLayer
->
forward
(
PASS_TRAIN
);
const
MatrixPtr
outValue
=
kmaxSeqScoreLayer
->
getOutputValue
();
CHECK_EQ
(
outValue
->
getHeight
(),
hasSubseq
?
subSeqStartPosition
.
size
()
-
1
:
seqStartPosition
.
size
()
-
1
);
CHECK_EQ
(
outValue
->
getWidth
(),
beamSize
);
checkLayerOut
(
groundTruth
,
outValue
->
getData
(),
beamSize
);
}
}
}
int
main
(
int
argc
,
char
**
argv
)
{
testing
::
InitGoogleTest
(
&
argc
,
argv
);
initMain
(
argc
,
argv
);
FLAGS_thread_local_rand_use_global_seed
=
true
;
srand
((
size_t
)(
time
(
NULL
)));
return
RUN_ALL_TESTS
();
}
paddle/operators/CMakeLists.txt
浏览文件 @
23ac8459
...
@@ -67,3 +67,5 @@ op_library(fc_op
...
@@ -67,3 +67,5 @@ op_library(fc_op
op_library
(
recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc
op_library
(
recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc
DEPS op_desc tensor op_registry operator net_op
)
DEPS op_desc tensor op_registry operator net_op
)
cc_test
(
recurrent_op_test SRCS recurrent_op_test.cc DEPS recurrent_op gtest mul_op add_op
)
cc_test
(
recurrent_op_test SRCS recurrent_op_test.cc DEPS recurrent_op gtest mul_op add_op
)
op_library
(
uniform_random_op
SRCS uniform_random_op.cc uniform_random_op.cu
)
paddle/operators/uniform_random_op.cc
0 → 100644
浏览文件 @
23ac8459
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <random>
#include <type_traits>
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
namespace
paddle
{
namespace
operators
{
// It seems that Eigen::Tensor::random in GPU will SEGFAULT.
// Use std::random and thrust::random(thrust is a std library in CUDA) to
// implement uniform random.
template
<
typename
T
>
class
CPUUniformRandomKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
*
tensor
=
context
.
Output
<
framework
::
Tensor
>
(
0
);
T
*
data
=
tensor
->
mutable_data
<
T
>
(
context
.
GetPlace
());
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
context
.
op_
.
GetAttr
<
int
>
(
"seed"
));
std
::
minstd_rand
engine
;
if
(
seed
==
0
)
{
seed
=
std
::
random_device
()();
}
engine
.
seed
(
seed
);
std
::
uniform_real_distribution
<
T
>
dist
(
static_cast
<
T
>
(
context
.
op_
.
GetAttr
<
float
>
(
"min"
)),
static_cast
<
T
>
(
context
.
op_
.
GetAttr
<
float
>
(
"max"
)));
for
(
ssize_t
i
=
0
;
i
<
framework
::
product
(
tensor
->
dims
());
++
i
)
{
data
[
i
]
=
dist
(
engine
);
}
}
};
class
UniformRandomOp
:
public
framework
::
OperatorWithKernel
{
protected:
void
InferShape
(
const
framework
::
InferShapeContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
GetAttr
<
float
>
(
"min"
)
<
GetAttr
<
float
>
(
"max"
),
"uniform_random's min must less then max"
);
auto
*
tensor
=
ctx
.
Output
<
framework
::
Tensor
>
(
0
);
auto
dims
=
GetAttr
<
std
::
vector
<
int
>>
(
"dims"
);
tensor
->
Resize
(
framework
::
make_ddim
(
dims
));
}
};
class
UniformRandomOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
UniformRandomOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
framework
::
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddOutput
(
"Out"
,
"The output tensor of uniform random op"
);
AddComment
(
R"DOC(Uniform random operator.
Used to initialize tensor with uniform random generator.
)DOC"
);
AddAttr
<
std
::
vector
<
int
>>
(
"dims"
,
"the dimension of random tensor"
);
AddAttr
<
float
>
(
"min"
,
"Minimum value of uniform random"
).
SetDefault
(
-
1.0
f
);
AddAttr
<
float
>
(
"max"
,
"Maximun value of uniform random"
).
SetDefault
(
1.0
f
);
AddAttr
<
int
>
(
"seed"
,
"Random seed of uniform random. "
"0 means generate a seed by system"
)
.
SetDefault
(
0
);
}
};
}
// namespace operators
}
// namespace paddle
REGISTER_OP
(
uniform_random
,
paddle
::
operators
::
UniformRandomOp
,
paddle
::
operators
::
UniformRandomOpMaker
);
REGISTER_OP_CPU_KERNEL
(
uniform_random
,
paddle
::
operators
::
CPUUniformRandomKernel
<
float
>
);
paddle/operators/uniform_random_op.cu
0 → 100644
浏览文件 @
23ac8459
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/random.h>
#include <thrust/transform.h>
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
namespace
paddle
{
namespace
operators
{
template
<
typename
T
>
struct
UniformGenerator
{
T
min_
,
max_
;
unsigned
int
seed_
;
__host__
__device__
UniformGenerator
(
T
min
,
T
max
,
int
seed
)
:
min_
(
min
),
max_
(
max
),
seed_
(
seed
)
{}
__host__
__device__
T
operator
()(
const
unsigned
int
n
)
const
{
thrust
::
minstd_rand
rng
;
rng
.
seed
(
seed_
);
thrust
::
uniform_real_distribution
<
T
>
dist
(
min_
,
max_
);
rng
.
discard
(
n
);
return
dist
(
rng
);
}
};
// It seems that Eigen::Tensor::random in GPU will SEGFAULT.
// Use std::random and thrust::random(thrust is a std library in CUDA) to
// implement uniform random.
template
<
typename
T
>
class
GPUUniformRandomKernel
:
public
framework
::
OpKernel
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
auto
*
tensor
=
context
.
Output
<
framework
::
Tensor
>
(
0
);
T
*
data
=
tensor
->
mutable_data
<
T
>
(
context
.
GetPlace
());
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
context
.
op_
.
GetAttr
<
int
>
(
"seed"
));
if
(
seed
==
0
)
{
seed
=
std
::
random_device
()();
}
T
min
=
static_cast
<
T
>
(
context
.
op_
.
GetAttr
<
float
>
(
"min"
));
T
max
=
static_cast
<
T
>
(
context
.
op_
.
GetAttr
<
float
>
(
"max"
));
thrust
::
counting_iterator
<
unsigned
int
>
index_sequence_begin
(
0
);
ssize_t
N
=
framework
::
product
(
tensor
->
dims
());
thrust
::
transform
(
index_sequence_begin
,
index_sequence_begin
+
N
,
thrust
::
device_ptr
<
T
>
(
data
),
UniformGenerator
<
T
>
(
min
,
max
,
seed
));
}
};
}
// namespace operators
}
// namespace paddle
REGISTER_OP_GPU_KERNEL
(
uniform_random
,
paddle
::
operators
::
GPUUniformRandomKernel
<
float
>
);
paddle/scripts/travis/build_doc.sh
浏览文件 @
23ac8459
...
@@ -5,15 +5,9 @@ set -e
...
@@ -5,15 +5,9 @@ set -e
mkdir
-p
$TRAVIS_BUILD_DIR
/build
mkdir
-p
$TRAVIS_BUILD_DIR
/build
cd
$TRAVIS_BUILD_DIR
/build
cd
$TRAVIS_BUILD_DIR
/build
# Compile paddle binaries first
cmake ..
-DCMAKE_BUILD_TYPE
=
Debug
-DWITH_GPU
=
OFF
-DWITH_DOC
=
OFF
-DWITH_MKLDNN
=
OFF
-DWITH_MKLML
=
OFF
-DWITH_GOLANG
=
ON
-DWITH_STYLE_CHECK
=
OFF
mkdir
output
make
-j
`
nproc
`
find ..
-name
'*whl'
| xargs pip
install
# install all wheels.
rm
-rf
*
# Compile Documentation only.
# Compile Documentation only.
cmake ..
-DCMAKE_BUILD_TYPE
=
Debug
-DWITH_GPU
=
OFF
-DWITH_MKLDNN
=
OFF
-DWITH_MKLML
=
OFF
-DWITH_DOC
=
ON
cmake ..
-DCMAKE_BUILD_TYPE
=
Debug
-DWITH_GPU
=
OFF
-DWITH_MKLDNN
=
OFF
-DWITH_MKLML
=
OFF
-DWITH_DOC
=
ON
make
-j
`
nproc
`
gen_proto_py
make
-j
`
nproc
`
paddle_docs paddle_docs_cn
make
-j
`
nproc
`
paddle_docs paddle_docs_cn
# check websites for broken links
# check websites for broken links
...
@@ -35,6 +29,7 @@ TARGET_BRANCH="gh-pages"
...
@@ -35,6 +29,7 @@ TARGET_BRANCH="gh-pages"
SOURCE_BRANCH
=
"master"
SOURCE_BRANCH
=
"master"
# Clone the repo to output directory
# Clone the repo to output directory
mkdir
output
git clone
$REPO
output
git clone
$REPO
output
cd
output
cd
output
...
...
proto/CMakeLists.txt
浏览文件 @
23ac8459
...
@@ -17,7 +17,7 @@ foreach(filename ${proto_filenames})
...
@@ -17,7 +17,7 @@ foreach(filename ${proto_filenames})
COMMAND
${
PROTOBUF_PROTOC_EXECUTABLE
}
COMMAND
${
PROTOBUF_PROTOC_EXECUTABLE
}
ARGS
"--python_out=
${
PROJ_ROOT
}
/python/paddle/proto"
ARGS
"--python_out=
${
PROJ_ROOT
}
/python/paddle/proto"
"-I"
${
CMAKE_CURRENT_SOURCE_DIR
}
${
ABS_FIL
}
"-I"
${
CMAKE_CURRENT_SOURCE_DIR
}
${
ABS_FIL
}
DEPENDS
${
ABS_FIL
}
${
external_project_dependencies
}
)
DEPENDS
${
ABS_FIL
}
protoc
)
endforeach
()
endforeach
()
add_custom_target
(
gen_proto_py ALL DEPENDS
${
PROTO_GEN_PY
}
)
add_custom_target
(
gen_proto_py ALL DEPENDS
${
PROTO_GEN_PY
}
)
python/paddle/trainer/config_parser.py
浏览文件 @
23ac8459
...
@@ -3248,6 +3248,16 @@ class CTCLayer(LayerBase):
...
@@ -3248,6 +3248,16 @@ class CTCLayer(LayerBase):
config_assert
(
len
(
self
.
inputs
)
==
2
,
'CTCLayer must have 2 inputs'
)
config_assert
(
len
(
self
.
inputs
)
==
2
,
'CTCLayer must have 2 inputs'
)
@
config_layer
(
'kmax_seq_score'
)
class
KmaxSeqScoreLayer
(
LayerBase
):
def
__init__
(
self
,
name
,
inputs
,
beam_size
,
**
xargs
):
super
(
KmaxSeqScoreLayer
,
self
).
__init__
(
name
,
'kmax_seq_score'
,
0
,
inputs
=
inputs
,
**
xargs
)
config_assert
(
len
(
self
.
inputs
)
==
1
,
'KmaxSeqScoreLayer has only one input.'
)
self
.
config
.
beam_size
=
beam_size
@
config_layer
(
'warp_ctc'
)
@
config_layer
(
'warp_ctc'
)
class
WarpCTCLayer
(
LayerBase
):
class
WarpCTCLayer
(
LayerBase
):
def
__init__
(
self
,
def
__init__
(
self
,
...
...
python/paddle/trainer_config_helpers/layers.py
浏览文件 @
23ac8459
...
@@ -132,6 +132,7 @@ __all__ = [
...
@@ -132,6 +132,7 @@ __all__ = [
'sub_nested_seq_layer'
,
'sub_nested_seq_layer'
,
'clip_layer'
,
'clip_layer'
,
'slice_projection'
,
'slice_projection'
,
'kmax_sequence_score_layer'
,
]
]
...
@@ -228,6 +229,8 @@ class LayerType(object):
...
@@ -228,6 +229,8 @@ class LayerType(object):
SUB_NESTED_SEQ
=
'sub_nested_seq'
SUB_NESTED_SEQ
=
'sub_nested_seq'
CLIP_LAYER
=
'clip'
CLIP_LAYER
=
'clip'
KMAX_SEQ_SCORE
=
'kmax_seq_score'
@
staticmethod
@
staticmethod
def
is_layer_type
(
type_name
):
def
is_layer_type
(
type_name
):
"""
"""
...
@@ -6158,7 +6161,8 @@ def clip_layer(input, min, max, name=None):
...
@@ -6158,7 +6161,8 @@ def clip_layer(input, min, max, name=None):
:type min: double
:type min: double
:param max: The upper threshold for clipping.
:param max: The upper threshold for clipping.
:type max: double
:type max: double
:return: LayerOutput
:return: LayerOutput object.
:rtype: LayerOutput
"""
"""
Layer
(
Layer
(
name
=
name
,
name
=
name
,
...
@@ -6168,3 +6172,41 @@ def clip_layer(input, min, max, name=None):
...
@@ -6168,3 +6172,41 @@ def clip_layer(input, min, max, name=None):
max
=
max
)
max
=
max
)
return
LayerOutput
(
return
LayerOutput
(
name
,
LayerType
.
CLIP_LAYER
,
parents
=
[
input
],
size
=
input
.
size
)
name
,
LayerType
.
CLIP_LAYER
,
parents
=
[
input
],
size
=
input
.
size
)
@
wrap_name_default
()
@
layer_support
()
def
kmax_sequence_score_layer
(
input
,
name
=
None
,
beam_size
=
1
):
"""
This layer accepts one input which are scores over a sequence or a nested
sequence, and returns indices of beam_size sequences with highest scores.
.. code-block:: python
kmax_indices = kmax_sequence_score_layer(input=input_layer, beam_size)
:param name: The Layer Name.
:type name: basestring
:param input: The input layer. It stores scores over a sequence or a nested
sequence and its size must be 1.
:type input: LayerOutput.
:param beam_size: squence indices with top beam_size scores are returned.
:type beam_size: double
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert
isinstance
(
input
,
LayerOutput
),
(
"kmax_sequence_score_layer "
"accepts only one input."
)
assert
input
.
size
==
1
,
(
"input of kmax_sequence_score_layer is a score"
"over a sequence or a nested sequence, so its width must be 1."
)
Layer
(
name
=
name
,
type
=
LayerType
.
KMAX_SEQ_SCORE
,
inputs
=
[
input
.
name
],
beam_size
=
beam_size
)
return
LayerOutput
(
name
,
LayerType
.
KMAX_SEQ_SCORE
,
parents
=
[
input
],
size
=
input
.
size
)
python/paddle/trainer_config_helpers/tests/configs/file_list.sh
浏览文件 @
23ac8459
...
@@ -8,6 +8,6 @@ test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops
...
@@ -8,6 +8,6 @@ test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops
test_seq_concat_reshape test_pad test_smooth_l1 test_multiplex_layer
test_seq_concat_reshape test_pad test_smooth_l1 test_multiplex_layer
test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_layer
test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_layer
test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer
test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer
test_seq_select_layers
)
test_
kmax_seq_socre_layer test_
seq_select_layers
)
export
whole_configs
=(
test_split_datasource
)
export
whole_configs
=(
test_split_datasource
)
python/paddle/trainer_config_helpers/tests/configs/protostr/test_kmax_seq_socre_layer.protostr
0 → 100644
浏览文件 @
23ac8459
type: "nn"
layers {
name: "input"
type: "data"
size: 300
active_type: ""
}
layers {
name: "data"
type: "data"
size: 128
active_type: ""
}
layers {
name: "__fc_layer_0__"
type: "fc"
size: 1
active_type: "exponential"
inputs {
input_layer_name: "data"
input_parameter_name: "___fc_layer_0__.w0"
}
bias_parameter_name: "___fc_layer_0__.wbias"
}
layers {
name: "__kmax_sequence_score_layer_0__"
type: "kmax_seq_score"
active_type: ""
inputs {
input_layer_name: "__fc_layer_0__"
}
beam_size: 5
}
parameters {
name: "___fc_layer_0__.w0"
size: 128
initial_mean: 0.0
initial_std: 0.0883883476483
dims: 128
dims: 1
initial_strategy: 0
initial_smart: true
}
parameters {
name: "___fc_layer_0__.wbias"
size: 1
initial_mean: 0.0
initial_std: 0.0
dims: 1
dims: 1
initial_strategy: 0
initial_smart: false
}
input_layer_names: "data"
output_layer_names: "__kmax_sequence_score_layer_0__"
sub_models {
name: "root"
layer_names: "input"
layer_names: "data"
layer_names: "__fc_layer_0__"
layer_names: "__kmax_sequence_score_layer_0__"
input_layer_names: "data"
output_layer_names: "__kmax_sequence_score_layer_0__"
is_recurrent_layer_group: false
}
python/paddle/trainer_config_helpers/tests/configs/test_kmax_seq_socre_layer.py
0 → 100644
浏览文件 @
23ac8459
#!/usr/bin/env python
#coding=utf-8
from
paddle.trainer_config_helpers
import
*
data
=
data_layer
(
name
=
'input'
,
size
=
300
)
data
=
data_layer
(
name
=
"data"
,
size
=
128
)
scores
=
fc_layer
(
input
=
data
,
size
=
1
,
act
=
ExpActivation
())
kmax_seq_id
=
kmax_sequence_score_layer
(
input
=
scores
,
beam_size
=
5
)
outputs
(
kmax_seq_id
)
python/paddle/v2/framework/tests/CMakeLists.txt
浏览文件 @
23ac8459
...
@@ -25,3 +25,4 @@ py_test(test_op_creation_methods SRCS test_op_creation_methods.py)
...
@@ -25,3 +25,4 @@ py_test(test_op_creation_methods SRCS test_op_creation_methods.py)
py_test
(
test_operator SRCS test_operator.py
)
py_test
(
test_operator SRCS test_operator.py
)
py_test
(
test_gaussian_random_op SRCS test_gaussian_random_op.py
)
py_test
(
test_gaussian_random_op SRCS test_gaussian_random_op.py
)
py_test
(
test_uniform_random_op SRCS test_uniform_random_op.py
)
python/paddle/v2/framework/tests/test_uniform_random_op.py
0 → 100644
浏览文件 @
23ac8459
import
unittest
from
paddle.v2.framework.op
import
Operator
import
paddle.v2.framework.core
as
core
import
numpy
class
UniformRandomTest
(
unittest
.
TestCase
):
def
test_uniform_random_cpu
(
self
):
self
.
uniform_random_test
(
place
=
core
.
CPUPlace
())
def
test_uniform_random_gpu
(
self
):
if
core
.
is_compile_gpu
():
self
.
uniform_random_test
(
place
=
core
.
GPUPlace
(
0
))
def
uniform_random_test
(
self
,
place
):
scope
=
core
.
Scope
()
scope
.
new_var
(
"X"
).
get_tensor
()
op
=
Operator
(
"uniform_random"
,
Out
=
"X"
,
dims
=
[
1000
,
784
],
min
=-
5.0
,
max
=
10.0
,
seed
=
10
)
op
.
infer_shape
(
scope
)
ctx
=
core
.
DeviceContext
.
create
(
place
)
op
.
run
(
scope
,
ctx
)
tensor
=
numpy
.
array
(
scope
.
find_var
(
"X"
).
get_tensor
())
self
.
assertAlmostEqual
(
tensor
.
mean
(),
2.5
,
delta
=
0.1
)
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录