Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
f8e8d1ad
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f8e8d1ad
编写于
12月 13, 2016
作者:
Y
Yu Yang
提交者:
GitHub
12月 13, 2016
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #846 from reyoung/feature/remove_m4
Remove m4 when generate protobuf
上级
b9935859
f62f5181
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
89 addition
and
102 deletion
+89
-102
paddle/gserver/layers/MultinomialSampler.h
paddle/gserver/layers/MultinomialSampler.h
+12
-1
paddle/gserver/layers/NCELayer.cpp
paddle/gserver/layers/NCELayer.cpp
+2
-2
paddle/gserver/layers/WarpCTCLayer.cpp
paddle/gserver/layers/WarpCTCLayer.cpp
+0
-1
paddle/pserver/ParameterClient2.cpp
paddle/pserver/ParameterClient2.cpp
+16
-17
proto/CMakeLists.txt
proto/CMakeLists.txt
+4
-26
proto/DataConfig.proto
proto/DataConfig.proto
+6
-6
proto/DataFormat.proto
proto/DataFormat.proto
+1
-1
proto/ModelConfig.proto
proto/ModelConfig.proto
+18
-18
proto/ParameterConfig.proto
proto/ParameterConfig.proto
+8
-8
proto/ParameterService.proto
proto/ParameterService.proto
+6
-6
proto/TrainerConfig.proto
proto/TrainerConfig.proto
+16
-16
未找到文件。
paddle/gserver/layers/MultinomialSampler.h
浏览文件 @
f8e8d1ad
...
@@ -14,8 +14,8 @@ limitations under the License. */
...
@@ -14,8 +14,8 @@ limitations under the License. */
#pragma once
#pragma once
#include <memory>
#include <random>
#include <random>
#include "paddle/utils/TypeDefs.h"
#include "paddle/utils/TypeDefs.h"
namespace
paddle
{
namespace
paddle
{
...
@@ -32,6 +32,17 @@ class MultinomialSampler {
...
@@ -32,6 +32,17 @@ class MultinomialSampler {
public:
public:
MultinomialSampler
(
const
real
*
prob
,
int
size
);
MultinomialSampler
(
const
real
*
prob
,
int
size
);
//! protobuf always using double.
static
MultinomialSampler
*
create
(
const
double
*
prob
,
int
size
)
{
#ifdef PADDLE_TYPE_DOUBLE
return
new
MultinomialSampler
(
prob
,
size
);
#else
std
::
unique_ptr
<
real
[]
>
tmp
(
new
real
[
size
]);
std
::
copy
(
prob
,
prob
+
size
,
tmp
.
get
());
return
new
MultinomialSampler
(
tmp
.
get
(),
size
);
#endif
}
/**
/**
* @brief Generate a random sample.
* @brief Generate a random sample.
* @param g is a random number engine. See <random>.
* @param g is a random number engine. See <random>.
...
...
paddle/gserver/layers/NCELayer.cpp
浏览文件 @
f8e8d1ad
...
@@ -99,8 +99,8 @@ public:
...
@@ -99,8 +99,8 @@ public:
if
(
config_
.
neg_sampling_dist_size
())
{
if
(
config_
.
neg_sampling_dist_size
())
{
CHECK_EQ
(
numClasses_
,
config_
.
neg_sampling_dist_size
());
CHECK_EQ
(
numClasses_
,
config_
.
neg_sampling_dist_size
());
sampler_
.
reset
(
new
MultinomialSampler
(
config_
.
neg_sampling_dist
().
data
(),
sampler_
.
reset
(
MultinomialSampler
::
create
(
numClasses_
));
config_
.
neg_sampling_dist
().
data
(),
numClasses_
));
}
}
return
true
;
return
true
;
...
...
paddle/gserver/layers/WarpCTCLayer.cpp
浏览文件 @
f8e8d1ad
...
@@ -31,7 +31,6 @@ bool WarpCTCLayer::init(const LayerMap& layerMap,
...
@@ -31,7 +31,6 @@ bool WarpCTCLayer::init(const LayerMap& layerMap,
CHECK_EQ
(
numClasses_
,
inputLayers_
[
0
]
->
getSize
());
CHECK_EQ
(
numClasses_
,
inputLayers_
[
0
]
->
getSize
());
blank_
=
config_
.
blank
();
blank_
=
config_
.
blank
();
CHECK_GE
(
blank_
,
0UL
);
CHECK_LT
(
blank_
,
numClasses_
);
CHECK_LT
(
blank_
,
numClasses_
);
normByTimes_
=
config_
.
norm_by_times
();
normByTimes_
=
config_
.
norm_by_times
();
...
...
paddle/pserver/ParameterClient2.cpp
浏览文件 @
f8e8d1ad
...
@@ -25,24 +25,17 @@ P_DEFINE_int32(parallel_thread_num, 1, "Thread number for parameter send");
...
@@ -25,24 +25,17 @@ P_DEFINE_int32(parallel_thread_num, 1, "Thread number for parameter send");
namespace
paddle
{
namespace
paddle
{
template
<
class
T
>
template
<
typename
T1
,
typename
T2
>
void
copyToRepeatedField
(
google
::
protobuf
::
RepeatedField
<
T
>*
dest
,
void
copyToRepeatedField
(
google
::
protobuf
::
RepeatedField
<
T
1
>*
dest
,
const
T
*
src
,
const
T
2
*
src
,
size_t
size
)
{
size_t
size
)
{
dest
->
Clear
();
dest
->
Clear
();
dest
->
Reserve
(
size
);
dest
->
Reserve
(
size
);
for
(
size_t
i
=
0
;
i
<
size
;
++
i
)
{
for
(
size_t
i
=
0
;
i
<
size
;
++
i
)
{
dest
->
AddAlreadyReserved
(
src
[
i
]);
dest
->
AddAlreadyReserved
(
src
[
i
]);
}
}
}
}
template
<
class
T
>
void
copyToRepeatedField
(
const
std
::
vector
<
T
>&
src
,
google
::
protobuf
::
RepeatedField
<
T
>*
dest
)
{
copyToRepeatedField
(
dest
,
&
src
[
0
],
src
.
size
());
}
ParameterClient2
::
ParameterClient2
(
bool
separate
,
int
port
,
int
numPorts
)
ParameterClient2
::
ParameterClient2
(
bool
separate
,
int
port
,
int
numPorts
)
:
BaseClient
(
separate
,
numPorts
),
port_
(
port
)
{
:
BaseClient
(
separate
,
numPorts
),
port_
(
port
)
{
#ifndef PADDLE_DISABLE_TIMER
#ifndef PADDLE_DISABLE_TIMER
...
@@ -618,6 +611,8 @@ void PreparedOperations::addOperationHelper(Operation* op, CpuMatrixPtr mat) {
...
@@ -618,6 +611,8 @@ void PreparedOperations::addOperationHelper(Operation* op, CpuMatrixPtr mat) {
pmat
.
mutable_values
(),
mat
->
getData
(),
pmat
.
num_cols
()
*
pmat
.
num_rows
());
pmat
.
mutable_values
(),
mat
->
getData
(),
pmat
.
num_cols
()
*
pmat
.
num_rows
());
}
}
static
inline
real
addTwo
(
real
a
,
double
b
)
{
return
a
+
b
;
}
void
ParameterClient2
::
doOperation
(
PreparedOperations
&
ops
,
void
ParameterClient2
::
doOperation
(
PreparedOperations
&
ops
,
bool
waitForGradient
,
bool
waitForGradient
,
bool
sendBackGradient
,
bool
sendBackGradient
,
...
@@ -682,8 +677,11 @@ void ParameterClient2::doOperation(PreparedOperations& ops,
...
@@ -682,8 +677,11 @@ void ParameterClient2::doOperation(PreparedOperations& ops,
CpuVectorPtr
rvec
=
resultVectors
[
i
];
CpuVectorPtr
rvec
=
resultVectors
[
i
];
if
(
!
rvec
)
continue
;
if
(
!
rvec
)
continue
;
CHECK_EQ
(
rvec
->
getSize
(),
(
size_t
)
vec
.
dim
());
CHECK_EQ
(
rvec
->
getSize
(),
(
size_t
)
vec
.
dim
());
CpuVector
avec
(
rvec
->
getSize
(),
const_cast
<
real
*>
(
vec
.
values
().
data
()));
std
::
transform
(
rvec
->
getData
(),
rvec
->
add
(
avec
);
rvec
->
getData
()
+
rvec
->
getSize
(),
vec
.
values
().
data
(),
rvec
->
getData
(),
addTwo
);
}
}
CHECK_EQ
(
resultMatrices
.
size
(),
(
size_t
)
result
.
matrices_size
());
CHECK_EQ
(
resultMatrices
.
size
(),
(
size_t
)
result
.
matrices_size
());
...
@@ -693,11 +691,12 @@ void ParameterClient2::doOperation(PreparedOperations& ops,
...
@@ -693,11 +691,12 @@ void ParameterClient2::doOperation(PreparedOperations& ops,
if
(
!
rmat
)
continue
;
if
(
!
rmat
)
continue
;
CHECK_EQ
(
rmat
->
getHeight
(),
(
size_t
)
mat
.
num_rows
());
CHECK_EQ
(
rmat
->
getHeight
(),
(
size_t
)
mat
.
num_rows
());
CHECK_EQ
(
rmat
->
getWidth
(),
(
size_t
)
mat
.
num_cols
());
CHECK_EQ
(
rmat
->
getWidth
(),
(
size_t
)
mat
.
num_cols
());
CpuMatrixPtr
amat
=
std
::
make_shared
<
CpuMatrix
>
(
const_cast
<
real
*>
(
mat
.
values
().
data
()),
std
::
transform
(
rmat
->
getData
(),
rmat
->
getHeight
(),
rmat
->
getData
()
+
rmat
->
getElementCnt
(),
rmat
->
getWidth
());
mat
.
values
().
data
(),
rmat
->
add
(
*
amat
);
rmat
->
getData
(),
addTwo
);
}
}
}
}
}
}
...
...
proto/CMakeLists.txt
浏览文件 @
f8e8d1ad
...
@@ -6,25 +6,6 @@ set(proto_filenames
...
@@ -6,25 +6,6 @@ set(proto_filenames
ParameterService.proto
ParameterService.proto
TrainerConfig.proto
)
TrainerConfig.proto
)
set
(
real_proto_files
)
# TODO(yuyang18): Some internal proto will also be depended on.
# Find a way to automatically calculate all depends.
foreach
(
filename
${
proto_filenames
}
)
set
(
PROTOBUF_3_FLAGS
""
)
if
(
PROTOBUF_3
)
set
(
PROTOBUF_3_FLAGS
"-Dproto3"
)
endif
()
add_custom_command
(
OUTPUT
${
filename
}
COMMAND
${
M4_EXECUTABLE
}
-Dreal=
${
ACCURACY
}
${
PROTOBUF_3_FLAGS
}
-I '
${
INTERNAL_PROTO_PATH
}
'
${
PROJ_ROOT
}
/proto/
${
filename
}
.m4 >
${
filename
}
DEPENDS
${
PROJ_ROOT
}
/proto/
${
filename
}
.m4
COMMENT
"Generate
${
filename
}
"
)
endforeach
()
add_custom_target
(
proto_accuracy ALL
DEPENDS
${
proto_filenames
}
)
set
(
PROTO_GEN
)
set
(
PROTO_GEN
)
set
(
PROTO_GEN_PY
)
set
(
PROTO_GEN_PY
)
...
@@ -39,9 +20,8 @@ foreach(filename ${proto_filenames})
...
@@ -39,9 +20,8 @@ foreach(filename ${proto_filenames})
add_custom_command
(
OUTPUT
${
CUR_PROTO_GEN
}
add_custom_command
(
OUTPUT
${
CUR_PROTO_GEN
}
COMMAND
${
PROTOBUF_PROTOC_EXECUTABLE
}
COMMAND
${
PROTOBUF_PROTOC_EXECUTABLE
}
--cpp_out
${
CMAKE_CURRENT_BINARY_DIR
}
--cpp_out
${
CMAKE_CURRENT_BINARY_DIR
}
--proto_path
${
CMAKE_CURRENT_BINARY_DIR
}
${
CMAKE_CURRENT_BINARY_DIR
}
/
${
filename
}
--proto_path
${
PROJ_ROOT
}
/proto
${
PROJ_ROOT
}
/proto/
${
filename
}
DEPENDS proto_accuracy
DEPENDS
${
filename
}
)
${
PROJ_ROOT
}
/proto/
${
filename
}
.m4
)
set
(
CUR_PROTO_GEN_PY
set
(
CUR_PROTO_GEN_PY
${
PROJ_ROOT
}
/paddle/python/paddle/proto/
${
base_filename
}
_pb2.py
)
${
PROJ_ROOT
}
/paddle/python/paddle/proto/
${
base_filename
}
_pb2.py
)
...
@@ -50,9 +30,8 @@ foreach(filename ${proto_filenames})
...
@@ -50,9 +30,8 @@ foreach(filename ${proto_filenames})
${
PROTO_GEN_PY
}
)
${
PROTO_GEN_PY
}
)
add_custom_command
(
OUTPUT
${
CUR_PROTO_GEN_PY
}
add_custom_command
(
OUTPUT
${
CUR_PROTO_GEN_PY
}
COMMAND
${
PROTOBUF_PROTOC_EXECUTABLE
}
--python_out
${
PROJ_ROOT
}
/python/paddle/proto
COMMAND
${
PROTOBUF_PROTOC_EXECUTABLE
}
--python_out
${
PROJ_ROOT
}
/python/paddle/proto
--proto_path
${
CMAKE_CURRENT_BINARY_DIR
}
${
CMAKE_CURRENT_BINARY_DIR
}
/
${
filename
}
--proto_path
${
PROJ_ROOT
}
/proto
${
PROJ_ROOT
}
/proto/
${
filename
}
DEPENDS proto_accuracy
DEPENDS
${
filename
}
)
${
PROJ_ROOT
}
/proto/
${
filename
}
.m4
)
endforeach
()
endforeach
()
include_directories
(
${
CMAKE_CURRENT_BINARY_DIR
}
/proto
)
include_directories
(
${
CMAKE_CURRENT_BINARY_DIR
}
/proto
)
...
@@ -61,5 +40,4 @@ add_custom_target(gen_proto_cpp ALL DEPENDS ${PROTO_GEN})
...
@@ -61,5 +40,4 @@ add_custom_target(gen_proto_cpp ALL DEPENDS ${PROTO_GEN})
add_custom_target
(
gen_proto_py ALL DEPENDS
${
PROTO_GEN_PY
}
)
add_custom_target
(
gen_proto_py ALL DEPENDS
${
PROTO_GEN_PY
}
)
add_library
(
paddle_proto STATIC
add_library
(
paddle_proto STATIC
${
PROTO_GEN
}
)
${
PROTO_GEN
}
)
add_dependencies
(
paddle_proto proto_accuracy
)
target_include_directories
(
paddle_proto PUBLIC
${
CMAKE_CURRENT_BINARY_DIR
}
)
target_include_directories
(
paddle_proto PUBLIC
${
CMAKE_CURRENT_BINARY_DIR
}
)
proto/DataConfig.proto
.m4
→
proto/DataConfig.proto
浏览文件 @
f8e8d1ad
...
@@ -11,11 +11,11 @@ distributed under the License is distributed on an "AS IS" BASIS,
...
@@ -11,11 +11,11 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
ifdef
(`
proto3
', `syntax = "proto2";'
)
syntax
=
"proto2"
;
package
paddle
;
package
paddle
;
sinclude
(`
DataConfigExt
.
proto
.
m4
')
message
FileGroupConf
{
message
FileGroupConf
{
optional
uint32
queue_capacity
=
1
[
default
=
1
];
optional
uint32
queue_capacity
=
1
[
default
=
1
];
// how many files to load for a load file thread
// how many files to load for a load file thread
...
@@ -26,7 +26,7 @@ message FileGroupConf {
...
@@ -26,7 +26,7 @@ message FileGroupConf {
};
};
message
DataConfig
{
message
DataConfig
{
sinclude(`DataConfigInter.proto.m4'
)
required
string
type
=
1
;
required
string
type
=
1
;
// name of a text file which contains a list of file names at each line
// name of a text file which contains a list of file names at each line
...
@@ -51,11 +51,11 @@ sinclude(`DataConfigInter.proto.m4')
...
@@ -51,11 +51,11 @@ sinclude(`DataConfigInter.proto.m4')
/// Note the field number 17, 18 and 19 have been deprecated.
/// Note the field number 17, 18 and 19 have been deprecated.
//
a
list
of
values
which
will
be
used
to
create
additional
one
dimensional
real
// a list of values which will be used to create additional one dimensional
float
// values slots. These one dimensional slots can be used as the weight input
// values slots. These one dimensional slots can be used as the weight input
// for cost layers.
// for cost layers.
// Currently this is only supported by ProtoDataProvider.
// Currently this is only supported by ProtoDataProvider.
repeated
real
constant_slots
=
20
;
repeated
double
constant_slots
=
20
;
// for PyDataProvider.
// for PyDataProvider.
// Specify the load data script module name, object name and user args
// Specify the load data script module name, object name and user args
...
@@ -80,6 +80,6 @@ sinclude(`DataConfigInter.proto.m4')
...
@@ -80,6 +80,6 @@ sinclude(`DataConfigInter.proto.m4')
optional
bool
is_main_data
=
26
[
default
=
true
];
optional
bool
is_main_data
=
26
[
default
=
true
];
// the usage ratio of instances. Setting to 1.0 means the use of all instances.
// the usage ratio of instances. Setting to 1.0 means the use of all instances.
optional
real
usage_ratio
=
27
[
default
=
1.0
];
optional
double
usage_ratio
=
27
[
default
=
1.0
];
};
};
proto/DataFormat.proto
.m4
→
proto/DataFormat.proto
浏览文件 @
f8e8d1ad
...
@@ -11,7 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
...
@@ -11,7 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
ifdef
(`
proto3
', `syntax = "proto2";'
)
syntax
=
"proto2"
;
package
paddle
;
package
paddle
;
...
...
proto/ModelConfig.proto
.m4
→
proto/ModelConfig.proto
浏览文件 @
f8e8d1ad
...
@@ -11,7 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
...
@@ -11,7 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
ifdef
(`
proto3
', `syntax = "proto2";'
)
syntax
=
"proto2"
;
import
"ParameterConfig.proto"
;
import
"ParameterConfig.proto"
;
...
@@ -20,7 +20,7 @@ package paddle;
...
@@ -20,7 +20,7 @@ package paddle;
/**
/**
* Various structs for the configuration of a neural network
* Various structs for the configuration of a neural network
*/
*/
sinclude
(`
ModelConfigExt
.
proto
.
m4
')
message
ExternalConfig
{
message
ExternalConfig
{
repeated
string
layer_names
=
1
;
repeated
string
layer_names
=
1
;
...
@@ -146,8 +146,8 @@ message NormConfig {
...
@@ -146,8 +146,8 @@ message NormConfig {
// the parameters for normalization
// the parameters for normalization
// u = u / (1+scale*sum(u^2 in window))^pow
// u = u / (1+scale*sum(u^2 in window))^pow
required
real
scale = 4;
required
double
scale
=
4
;
required
real
pow = 5;
required
double
pow
=
5
;
// The size of output feature map.
// The size of output feature map.
required
uint32
output_x
=
6
;
required
uint32
output_x
=
6
;
...
@@ -223,7 +223,7 @@ message OperatorConfig {
...
@@ -223,7 +223,7 @@ message OperatorConfig {
required
uint64
output_size
=
4
;
required
uint64
output_size
=
4
;
// For DotMulOperator
// For DotMulOperator
optional
real
dotmul_scale = 5 [default = 1.0];
optional
double
dotmul_scale
=
5
[
default
=
1.0
];
// For ConvOperator
// For ConvOperator
optional
ConvConfig
conv_conf
=
6
;
optional
ConvConfig
conv_conf
=
6
;
...
@@ -266,7 +266,7 @@ message LayerInputConfig {
...
@@ -266,7 +266,7 @@ message LayerInputConfig {
}
}
message
LayerConfig
{
message
LayerConfig
{
sinclude(`ModelConfigLayer.proto.m4'
)
required
string
name
=
1
;
required
string
name
=
1
;
required
string
type
=
2
;
required
string
type
=
2
;
optional
uint64
size
=
3
;
optional
uint64
size
=
3
;
...
@@ -293,7 +293,7 @@ sinclude(`ModelConfigLayer.proto.m4')
...
@@ -293,7 +293,7 @@ sinclude(`ModelConfigLayer.proto.m4')
optional
uint32
partial_sum
=
9
;
optional
uint32
partial_sum
=
9
;
// for dropout
// for dropout
optional
real
drop_rate = 10;
optional
double
drop_rate
=
10
;
// for HierarchicalSoftmaxLayer and NCELayer
// for HierarchicalSoftmaxLayer and NCELayer
// the number of classes
// the number of classes
...
@@ -317,17 +317,17 @@ sinclude(`ModelConfigLayer.proto.m4')
...
@@ -317,17 +317,17 @@ sinclude(`ModelConfigLayer.proto.m4')
// For NCELayer
// For NCELayer
// The distribution for generating the random negative labels.
// The distribution for generating the random negative labels.
// A uniform distribution will be used if not provided
// A uniform distribution will be used if not provided
repeated
real
neg_sampling_dist
=
17
[
packed
=
true
];
repeated
double
neg_sampling_dist
=
17
[
packed
=
true
];
// For MaxLayer
// For MaxLayer
// default: output VALUE of MaxLayer. set this flag to true for output INDEX
// default: output VALUE of MaxLayer. set this flag to true for output INDEX
//
INDEX
will
be
put
in
Argument
::
value
as
real
values
.
// INDEX will be put in Argument::value as
double
values.
optional
bool
output_max_index
=
19
[
default
=
false
];
optional
bool
output_max_index
=
19
[
default
=
false
];
/// The filed number 20 have been deprecated.
/// The filed number 20 have been deprecated.
// For self-normalized estimation
// For self-normalized estimation
optional
real
softmax_selfnorm_alpha
=
21
[
default
=
0.1
];
optional
double
softmax_selfnorm_alpha
=
21
[
default
=
0.1
];
/// The filed numbers 22 and 23 have been deprecated.
/// The filed numbers 22 and 23 have been deprecated.
...
@@ -338,14 +338,14 @@ sinclude(`ModelConfigLayer.proto.m4')
...
@@ -338,14 +338,14 @@ sinclude(`ModelConfigLayer.proto.m4')
optional
bool
norm_by_times
=
25
;
optional
bool
norm_by_times
=
25
;
// for CostLayers
// for CostLayers
optional
real
coeff
=
26
[
default
=
1.0
];
optional
double
coeff
=
26
[
default
=
1.0
];
// for AverageLayer
// for AverageLayer
// can be set to: 'average', 'sum' or 'squarerootn'
// can be set to: 'average', 'sum' or 'squarerootn'
optional
string
average_strategy
=
27
;
optional
string
average_strategy
=
27
;
// for error clipping
// for error clipping
optional
real
error_clipping_threshold
=
28
[
default
=
0.0
];
optional
double
error_clipping_threshold
=
28
[
default
=
0.0
];
// for operators used by mixed layer
// for operators used by mixed layer
repeated
OperatorConfig
operator_confs
=
29
;
repeated
OperatorConfig
operator_confs
=
29
;
...
@@ -355,11 +355,11 @@ sinclude(`ModelConfigLayer.proto.m4')
...
@@ -355,11 +355,11 @@ sinclude(`ModelConfigLayer.proto.m4')
optional
int32
max_sort_size
=
31
;
optional
int32
max_sort_size
=
31
;
// for SlopeInterceptLayer
// for SlopeInterceptLayer
optional
real
slope
=
32
;
optional
double
slope
=
32
;
optional
real
intercept
=
33
;
optional
double
intercept
=
33
;
// for CosSimVecMatLayer and CosSimLayer
// for CosSimVecMatLayer and CosSimLayer
optional
real
cos_scale
=
34
;
optional
double
cos_scale
=
34
;
// for DataNormLayer
// for DataNormLayer
// can be set to: 'z-score', 'min-max' or 'decimal-scaling'
// can be set to: 'z-score', 'min-max' or 'decimal-scaling'
...
@@ -394,7 +394,7 @@ sinclude(`ModelConfigLayer.proto.m4')
...
@@ -394,7 +394,7 @@ sinclude(`ModelConfigLayer.proto.m4')
// if number of the selected columns is less than
// if number of the selected columns is less than
// sample number * selective_fc output size * selective_fc_mull_mull_ratio
// sample number * selective_fc output size * selective_fc_mull_mull_ratio
// sparse multiplication is used, otherwise, using full multiplication.
// sparse multiplication is used, otherwise, using full multiplication.
optional
real
selective_fc_full_mul_ratio
=
44
[
default
=
0.02
];
optional
double
selective_fc_full_mul_ratio
=
44
[
default
=
0.02
];
// to indicate how many threads selective_fc use to to accelate
// to indicate how many threads selective_fc use to to accelate
// the plain_mul period
// the plain_mul period
...
@@ -406,7 +406,7 @@ sinclude(`ModelConfigLayer.proto.m4')
...
@@ -406,7 +406,7 @@ sinclude(`ModelConfigLayer.proto.m4')
optional
bool
use_global_stats
=
46
;
optional
bool
use_global_stats
=
46
;
// use to compute moving mean and variance.
// use to compute moving mean and variance.
optional
real
moving_average_fraction
=
47
[
default
=
0.9
];
optional
double
moving_average_fraction
=
47
[
default
=
0.9
];
// bias size
// bias size
optional
uint32
bias_size
=
48
[
default
=
0
];
optional
uint32
bias_size
=
48
[
default
=
0
];
...
@@ -438,7 +438,7 @@ message EvaluatorConfig {
...
@@ -438,7 +438,7 @@ message EvaluatorConfig {
// Used by PrecisionRecallEvaluator and ClassificationErrorEvaluator
// Used by PrecisionRecallEvaluator and ClassificationErrorEvaluator
// For multi binary labels: true if output > classification_threshold
// For multi binary labels: true if output > classification_threshold
optional
real
classification_threshold = 6 [default = 0.5];
optional
double
classification_threshold
=
6
[
default
=
0.5
];
// The positive label. -1 means average precision and recall
// The positive label. -1 means average precision and recall
optional
int32
positive_label
=
7
[
default
=
-
1
];
optional
int32
positive_label
=
7
[
default
=
-
1
];
...
...
proto/ParameterConfig.proto
.m4
→
proto/ParameterConfig.proto
浏览文件 @
f8e8d1ad
...
@@ -11,7 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
...
@@ -11,7 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
ifdef
(`
proto3
', `syntax = "proto2";'
)
syntax
=
"proto2"
;
package
paddle
;
package
paddle
;
...
@@ -32,14 +32,14 @@ message ParameterUpdaterHookConfig {
...
@@ -32,14 +32,14 @@ message ParameterUpdaterHookConfig {
message
ParameterConfig
{
message
ParameterConfig
{
required
string
name
=
1
;
required
string
name
=
1
;
required
uint64
size
=
2
;
required
uint64
size
=
2
;
optional
real
learning_rate
=
3
[
default
=
1.0
];
optional
double
learning_rate
=
3
[
default
=
1.0
];
optional
real
momentum
=
4
[
default
=
0.0
];
optional
double
momentum
=
4
[
default
=
0.0
];
optional
real
initial_mean
=
5
[
default
=
0.0
];
optional
double
initial_mean
=
5
[
default
=
0.0
];
optional
real
initial_std
=
6
[
default
=
0.01
];
optional
double
initial_std
=
6
[
default
=
0.01
];
// use L2-regularization if decay_rate set and decay_rate_l1 not set
// use L2-regularization if decay_rate set and decay_rate_l1 not set
optional
real
decay_rate
=
7
[
default
=
0.0
];
optional
double
decay_rate
=
7
[
default
=
0.0
];
// use L1-regularization if decay_rate_l1 set
// use L1-regularization if decay_rate_l1 set
optional
real
decay_rate_l1
=
8
[
default
=
0.0
];
optional
double
decay_rate_l1
=
8
[
default
=
0.0
];
// dims of Parameter, e.g. dims[0] as height, dims[1] as width..
// dims of Parameter, e.g. dims[0] as height, dims[1] as width..
repeated
uint64
dims
=
9
;
repeated
uint64
dims
=
9
;
// the gpu device which the parameter in.
// the gpu device which the parameter in.
...
@@ -60,7 +60,7 @@ message ParameterConfig {
...
@@ -60,7 +60,7 @@ message ParameterConfig {
// sparse remote update or not
// sparse remote update or not
optional
bool
sparse_remote_update
=
16
[
default
=
false
];
optional
bool
sparse_remote_update
=
16
[
default
=
false
];
// gradient clipping threshold, no clipping by default
// gradient clipping threshold, no clipping by default
optional
real
gradient_clipping_threshold
=
17
[
default
=
0.0
];
optional
double
gradient_clipping_threshold
=
17
[
default
=
0.0
];
// static parameters are fixed when training
// static parameters are fixed when training
optional
bool
is_static
=
18
[
default
=
false
];
optional
bool
is_static
=
18
[
default
=
false
];
// para_id should NOT be set by config_parser. It is for
// para_id should NOT be set by config_parser. It is for
...
...
proto/ParameterService.proto
.m4
→
proto/ParameterService.proto
浏览文件 @
f8e8d1ad
...
@@ -11,7 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
...
@@ -11,7 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
ifdef
(`
proto3
', `syntax = "proto2";'
)
syntax
=
"proto2"
;
import
"ParameterConfig.proto"
;
import
"ParameterConfig.proto"
;
import
"TrainerConfig.proto"
;
import
"TrainerConfig.proto"
;
...
@@ -73,7 +73,7 @@ message SendParameterRequest {
...
@@ -73,7 +73,7 @@ message SendParameterRequest {
optional
int64
num_samples
=
4
;
optional
int64
num_samples
=
4
;
// cost will be used to calculate global objective value
// cost will be used to calculate global objective value
optional
real
cost
=
5
;
optional
double
cost
=
5
;
required
BatchStatus
batch_status
=
6
;
required
BatchStatus
batch_status
=
6
;
...
@@ -245,13 +245,13 @@ enum MatrixVectorOperation {
...
@@ -245,13 +245,13 @@ enum MatrixVectorOperation {
message
ProtoVector
{
message
ProtoVector
{
required
int64
dim
=
1
;
required
int64
dim
=
1
;
repeated
real
values
=
2
[
packed
=
true
];
repeated
double
values
=
2
[
packed
=
true
];
}
}
message
ProtoMatrix
{
message
ProtoMatrix
{
required
int64
num_rows
=
1
;
required
int64
num_rows
=
1
;
required
int64
num_cols
=
2
;
required
int64
num_cols
=
2
;
repeated
real
values
=
3
[
packed
=
true
];
repeated
double
values
=
3
[
packed
=
true
];
}
}
message
Operation
{
message
Operation
{
...
@@ -263,7 +263,7 @@ message Operation {
...
@@ -263,7 +263,7 @@ message Operation {
// matrix handles created on the pserver
// matrix handles created on the pserver
repeated
int64
pmatrices
=
3
;
// A, B, C
repeated
int64
pmatrices
=
3
;
// A, B, C
repeated
real
scalars
=
4
;
//
a
,
b
,
c
repeated
double
scalars
=
4
;
// a, b, c
repeated
ProtoVector
vectors
=
5
;
// x, y, z
repeated
ProtoVector
vectors
=
5
;
// x, y, z
repeated
ProtoMatrix
matrices
=
6
;
// X, Y, Z
repeated
ProtoMatrix
matrices
=
6
;
// X, Y, Z
}
}
...
@@ -272,7 +272,7 @@ message OperationResult {
...
@@ -272,7 +272,7 @@ message OperationResult {
// error message. Empty if success
// error message. Empty if success
optional
string
return_message
=
1
;
optional
string
return_message
=
1
;
//
//
repeated
real
scalars
=
2
;
//
d
,
e
,
f
repeated
double
scalars
=
2
;
// d, e, f
repeated
ProtoVector
vectors
=
3
;
// p, q, r
repeated
ProtoVector
vectors
=
3
;
// p, q, r
repeated
ProtoMatrix
matrices
=
4
;
// P, Q, R
repeated
ProtoMatrix
matrices
=
4
;
// P, Q, R
}
}
...
...
proto/TrainerConfig.proto
.m4
→
proto/TrainerConfig.proto
浏览文件 @
f8e8d1ad
...
@@ -11,7 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
...
@@ -11,7 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
ifdef
(`
proto3
', `syntax = "proto2";'
)
syntax
=
"proto2"
;
import
"DataConfig.proto"
;
import
"DataConfig.proto"
;
import
"ModelConfig.proto"
;
import
"ModelConfig.proto"
;
...
@@ -24,9 +24,9 @@ message OptimizationConfig {
...
@@ -24,9 +24,9 @@ message OptimizationConfig {
optional
int32
num_batches_per_send_parameter
=
5
[
default
=
1
];
optional
int32
num_batches_per_send_parameter
=
5
[
default
=
1
];
optional
int32
num_batches_per_get_parameter
=
6
[
default
=
1
];
optional
int32
num_batches_per_get_parameter
=
6
[
default
=
1
];
required
real
learning_rate
=
7
;
required
double
learning_rate
=
7
;
optional
real
learning_rate_decay_a
=
8
[
default
=
0
];
optional
double
learning_rate_decay_a
=
8
[
default
=
0
];
optional
real
learning_rate_decay_b
=
9
[
default
=
0
];
optional
double
learning_rate_decay_b
=
9
[
default
=
0
];
optional
string
learning_rate_schedule
=
27
[
default
=
"constant"
];
optional
string
learning_rate_schedule
=
27
[
default
=
"constant"
];
// learning rate will be scaled according to learning_rate_schedule
// learning rate will be scaled according to learning_rate_schedule
// 1), constant:
// 1), constant:
...
@@ -49,14 +49,14 @@ message OptimizationConfig {
...
@@ -49,14 +49,14 @@ message OptimizationConfig {
// owlqn related
// owlqn related
// L1-regularization
// L1-regularization
optional
real
l1weight
=
10
[
default
=
0.1
];
optional
double
l1weight
=
10
[
default
=
0.1
];
// L2-regularization
// L2-regularization
optional
real
l2weight
=
11
[
default
=
0
];
optional
double
l2weight
=
11
[
default
=
0
];
// "c1" in wolfe condition: if (newobj <= oldobj + c1 * origDirDeriv * step)
// "c1" in wolfe condition: if (newobj <= oldobj + c1 * origDirDeriv * step)
// then accept the step
// then accept the step
optional
real
c1
=
12
[
default
=
0.0001
];
optional
double
c1
=
12
[
default
=
0.0001
];
// multiply the step with "backoff", when wolfe condition doesn't satisfy
// multiply the step with "backoff", when wolfe condition doesn't satisfy
optional
real
backoff = 13 [default = 0.5];
optional
double
backoff
=
13
[
default
=
0.5
];
// how many "s"s and "y"s are kept in owlqn
// how many "s"s and "y"s are kept in owlqn
optional
int32
owlqn_steps
=
14
[
default
=
10
];
optional
int32
owlqn_steps
=
14
[
default
=
10
];
// accept the step if encountered "max_backoff" times of "reduce the step"
// accept the step if encountered "max_backoff" times of "reduce the step"
...
@@ -82,15 +82,15 @@ message OptimizationConfig {
...
@@ -82,15 +82,15 @@ message OptimizationConfig {
// default learning method("momentum") use global decayed learning rate with momentum.
// default learning method("momentum") use global decayed learning rate with momentum.
// "adagrad", "adadelta" and "rmsprop" can set momentum too.
// "adagrad", "adadelta" and "rmsprop" can set momentum too.
optional
string
learning_method
=
23
[
default
=
"momentum"
];
optional
string
learning_method
=
23
[
default
=
"momentum"
];
optional
real
ada_epsilon = 24 [default = 1e-6];
optional
double
ada_epsilon
=
24
[
default
=
1e-6
];
optional
real
ada_rou = 26 [default = 0.95];
optional
double
ada_rou
=
26
[
default
=
0.95
];
// Force to do average in cpu in order to save gpu memory usage
// Force to do average in cpu in order to save gpu memory usage
optional
bool
do_average_in_cpu
=
25
[
default
=
false
];
optional
bool
do_average_in_cpu
=
25
[
default
=
false
];
// delta add rate in pserver, used while num_batches_per_send_parameter>1
// delta add rate in pserver, used while num_batches_per_send_parameter>1
// will be divided by #machines automatically.
// will be divided by #machines automatically.
optional
real
delta_add_rate = 28 [default = 1.0];
optional
double
delta_add_rate
=
28
[
default
=
1.0
];
// We split a large size into smaller mini-batches, whose sizes are
// We split a large size into smaller mini-batches, whose sizes are
// determined by mini_batch_size. It only takes effect when there is
// determined by mini_batch_size. It only takes effect when there is
...
@@ -108,14 +108,14 @@ message OptimizationConfig {
...
@@ -108,14 +108,14 @@ message OptimizationConfig {
// shrink sparse parameter value
// shrink sparse parameter value
// only works if parameter is remote sparse update and has L1 decay rate
// only works if parameter is remote sparse update and has L1 decay rate
optional
real
shrink_parameter_value = 32 [default = 0];
optional
double
shrink_parameter_value
=
32
[
default
=
0
];
////////////////////////////
////////////////////////////
// Options Adam Optimizer //
// Options Adam Optimizer //
////////////////////////////
////////////////////////////
optional
real
adam_beta1 = 33 [default = 0.9];
optional
double
adam_beta1
=
33
[
default
=
0.9
];
optional
real
adam_beta2 = 34 [default = 0.999];
optional
double
adam_beta2
=
34
[
default
=
0.999
];
optional
real
adam_epsilon = 35 [default = 1e-8];
optional
double
adam_epsilon
=
35
[
default
=
1e-8
];
// arguments for learning rate scheduler
// arguments for learning rate scheduler
// Format: num1:rate1,num2:rate2,...,numK:rateK
// Format: num1:rate1,num2:rate2,...,numK:rateK
...
@@ -127,7 +127,7 @@ message OptimizationConfig {
...
@@ -127,7 +127,7 @@ message OptimizationConfig {
// for async sgd gradient commit control.
// for async sgd gradient commit control.
// when async_lagged_grad_discard_ratio * num_gradient_servers commit passed,
// when async_lagged_grad_discard_ratio * num_gradient_servers commit passed,
// current async gradient will be discard silently.
// current async gradient will be discard silently.
optional
real
async_lagged_grad_discard_ratio = 37 [default = 1.5];
optional
double
async_lagged_grad_discard_ratio
=
37
[
default
=
1.5
];
};
};
message
TrainerConfig
{
message
TrainerConfig
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录