Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
4121ad3e
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4121ad3e
编写于
7月 01, 2018
作者:
X
Xin Pan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix test paths
上级
312f9170
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
42 addition
and
39 deletion
+42
-39
paddle/legacy/gserver/tests/test_CompareSparse.cpp
paddle/legacy/gserver/tests/test_CompareSparse.cpp
+1
-1
paddle/legacy/gserver/tests/test_CompareTwoNets.cpp
paddle/legacy/gserver/tests/test_CompareTwoNets.cpp
+3
-2
paddle/legacy/gserver/tests/test_MKLDNN.cpp
paddle/legacy/gserver/tests/test_MKLDNN.cpp
+1
-1
paddle/legacy/gserver/tests/test_NetworkCompare.cpp
paddle/legacy/gserver/tests/test_NetworkCompare.cpp
+14
-14
paddle/legacy/gserver/tests/test_PyDataProvider.cpp
paddle/legacy/gserver/tests/test_PyDataProvider.cpp
+4
-2
paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp
...le/legacy/gserver/tests/test_RecurrentGradientMachine.cpp
+12
-12
paddle/legacy/gserver/tests/test_SelectiveFCLayer.cpp
paddle/legacy/gserver/tests/test_SelectiveFCLayer.cpp
+7
-7
未找到文件。
paddle/legacy/gserver/tests/test_CompareSparse.cpp
浏览文件 @
4121ad3e
...
...
@@ -22,7 +22,7 @@ limitations under the License. */
using
namespace
paddle
;
// NOLINT
using
namespace
std
;
// NOLINT
static
const
string
&
configFile1
=
"gserver/tests/sequence_lstm.conf"
;
static
const
string
&
configFile1
=
"
legacy/
gserver/tests/sequence_lstm.conf"
;
DECLARE_bool
(
use_gpu
);
DECLARE_string
(
config
);
...
...
paddle/legacy/gserver/tests/test_CompareTwoNets.cpp
浏览文件 @
4121ad3e
...
...
@@ -40,9 +40,10 @@ DEFINE_double(
DECLARE_bool
(
thread_local_rand_use_global_seed
);
DECLARE_int32
(
seed
);
static
const
string
&
config_file_a
=
"gserver/tests/sequence_recurrent.py"
;
static
const
string
&
config_file_a
=
"legacy/gserver/tests/sequence_recurrent.py"
;
static
const
string
&
config_file_b
=
"gserver/tests/sequence_recurrent_group.py"
;
"
legacy/
gserver/tests/sequence_recurrent_group.py"
;
struct
ComData
{
vector
<
Argument
>
outArgs
;
...
...
paddle/legacy/gserver/tests/test_MKLDNN.cpp
浏览文件 @
4121ad3e
...
...
@@ -426,7 +426,7 @@ DECLARE_string(config_args);
TEST
(
MKLDNNNet
,
net
)
{
std
::
vector
<
std
::
string
>
cases
=
{
"simple"
,
"branch"
};
for
(
auto
name
:
cases
)
{
std
::
string
config
=
"./gserver/tests/mkldnn_"
+
name
+
"_net.conf"
;
std
::
string
config
=
"./
legacy/
gserver/tests/mkldnn_"
+
name
+
"_net.conf"
;
for
(
auto
channels
:
{
2
,
32
})
{
std
::
ostringstream
oss
;
oss
<<
"channels="
<<
channels
;
...
...
paddle/legacy/gserver/tests/test_NetworkCompare.cpp
浏览文件 @
4121ad3e
...
...
@@ -220,33 +220,33 @@ void compareNetwork(const std::string& config_file_a,
}
TEST
(
Compare
,
concat_dotmul
)
{
std
::
string
config_file_a
=
"./gserver/tests/concat_dotmul_a.conf"
;
std
::
string
config_file_b
=
"./gserver/tests/concat_dotmul_b.conf"
;
std
::
string
config_file_a
=
"./
legacy/
gserver/tests/concat_dotmul_a.conf"
;
std
::
string
config_file_b
=
"./
legacy/
gserver/tests/concat_dotmul_b.conf"
;
compareNetwork
(
config_file_a
,
config_file_b
);
}
TEST
(
Compare
,
concat_fullmatrix
)
{
std
::
string
config_file_a
=
"./gserver/tests/concat_fullmatrix_a.conf"
;
std
::
string
config_file_b
=
"./gserver/tests/concat_fullmatrix_b.conf"
;
std
::
string
config_file_a
=
"./
legacy/
gserver/tests/concat_fullmatrix_a.conf"
;
std
::
string
config_file_b
=
"./
legacy/
gserver/tests/concat_fullmatrix_b.conf"
;
compareNetwork
(
config_file_a
,
config_file_b
);
}
TEST
(
Compare
,
concat_table
)
{
std
::
string
config_file_a
=
"./gserver/tests/concat_table_a.conf"
;
std
::
string
config_file_b
=
"./gserver/tests/concat_table_b.conf"
;
std
::
string
config_file_a
=
"./
legacy/
gserver/tests/concat_table_a.conf"
;
std
::
string
config_file_b
=
"./
legacy/
gserver/tests/concat_table_b.conf"
;
compareNetwork
(
config_file_a
,
config_file_b
);
}
TEST
(
Compare
,
concat_slice
)
{
std
::
string
config_file_a
=
"./gserver/tests/concat_slice_a.conf"
;
std
::
string
config_file_b
=
"./gserver/tests/concat_slice_b.conf"
;
std
::
string
config_file_a
=
"./
legacy/
gserver/tests/concat_slice_a.conf"
;
std
::
string
config_file_b
=
"./
legacy/
gserver/tests/concat_slice_b.conf"
;
compareNetwork
(
config_file_a
,
config_file_b
);
}
#ifdef PADDLE_WITH_CUDA
TEST
(
Compare
,
img_pool
)
{
std
::
string
config_file_a
=
"./gserver/tests/img_pool_a.conf"
;
std
::
string
config_file_b
=
"./gserver/tests/img_pool_b.conf"
;
std
::
string
config_file_a
=
"./
legacy/
gserver/tests/img_pool_a.conf"
;
std
::
string
config_file_b
=
"./
legacy/
gserver/tests/img_pool_b.conf"
;
bool
useGpu
=
FLAGS_use_gpu
;
FLAGS_use_gpu
=
true
;
compareNetwork
(
config_file_a
,
config_file_b
);
...
...
@@ -254,8 +254,8 @@ TEST(Compare, img_pool) {
}
TEST
(
Compare
,
img_conv
)
{
std
::
string
config_file_a
=
"./gserver/tests/img_conv_a.conf"
;
std
::
string
config_file_b
=
"./gserver/tests/img_conv_b.conf"
;
std
::
string
config_file_a
=
"./
legacy/
gserver/tests/img_conv_a.conf"
;
std
::
string
config_file_b
=
"./
legacy/
gserver/tests/img_conv_b.conf"
;
bool
useGpu
=
FLAGS_use_gpu
;
FLAGS_use_gpu
=
true
;
compareNetwork
(
config_file_a
,
config_file_b
);
...
...
@@ -264,8 +264,8 @@ TEST(Compare, img_conv) {
// Test cudnn_conv and exconv give the same result
TEST
(
Compare
,
img_conv2
)
{
std
::
string
config_file_a
=
"./gserver/tests/img_conv_cudnn.py"
;
std
::
string
config_file_b
=
"./gserver/tests/img_conv_exconv.py"
;
std
::
string
config_file_a
=
"./
legacy/
gserver/tests/img_conv_cudnn.py"
;
std
::
string
config_file_b
=
"./
legacy/
gserver/tests/img_conv_exconv.py"
;
bool
useGpu
=
FLAGS_use_gpu
;
double
eps
=
FLAGS_checkgrad_eps
;
FLAGS_use_gpu
=
true
;
...
...
paddle/legacy/gserver/tests/test_PyDataProvider.cpp
浏览文件 @
4121ad3e
...
...
@@ -35,7 +35,8 @@ TEST(PyDataProvider, py_fill_slots) {
config
.
set_load_data_module
(
std
::
string
(
"pyDataProvider"
));
config
.
set_load_data_object
(
std
::
string
(
"SimpleDataProvider"
));
config
.
clear_files
();
std
::
string
dataFile
=
"gserver/tests/pyDataProvider/pyDataProviderList"
;
std
::
string
dataFile
=
"legacy/gserver/tests/pyDataProvider/pyDataProviderList"
;
config
.
set_files
(
dataFile
);
#ifndef PADDLE_WITH_CUDA
bool
useGpu
=
false
;
...
...
@@ -68,7 +69,8 @@ TEST(PyDataProvider, py_fill_nest_slots) {
config
.
set_load_data_module
(
std
::
string
(
"pyDataProvider"
));
config
.
set_load_data_object
(
std
::
string
(
"SimpleNestDataProvider"
));
config
.
clear_files
();
std
::
string
dataFile
=
"gserver/tests/pyDataProvider/pyDataProviderList"
;
std
::
string
dataFile
=
"legacy/gserver/tests/pyDataProvider/pyDataProviderList"
;
config
.
set_files
(
dataFile
);
EXPECT_EQ
(
config
.
IsInitialized
(),
true
);
#ifndef PADDLE_WITH_CUDA
...
...
paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp
浏览文件 @
4121ad3e
...
...
@@ -102,11 +102,11 @@ void test(const string& conf1, const string& conf2, double eps, bool useGpu) {
FLAGS_use_gpu
=
useGpu
;
int
num_passes
=
5
;
real
*
cost1
=
new
real
[
num_passes
];
const
string
dir1
=
"gserver/tests/t1"
;
const
string
dir1
=
"
legacy/
gserver/tests/t1"
;
CalCost
(
conf1
,
dir1
,
cost1
,
num_passes
);
real
*
cost2
=
new
real
[
num_passes
];
const
string
dir2
=
"gserver/tests/t2"
;
const
string
dir2
=
"
legacy/
gserver/tests/t2"
;
CalCost
(
conf2
,
dir2
,
cost2
,
num_passes
);
for
(
int
i
=
0
;
i
<
num_passes
;
i
++
)
{
...
...
@@ -121,8 +121,8 @@ void test(const string& conf1, const string& conf2, double eps, bool useGpu) {
TEST
(
RecurrentGradientMachine
,
HasSubSequence
)
{
for
(
bool
useGpu
:
{
false
,
true
})
{
test
(
"gserver/tests/sequence_layer_group.conf"
,
"gserver/tests/sequence_nest_layer_group.conf"
,
test
(
"
legacy/
gserver/tests/sequence_layer_group.conf"
,
"
legacy/
gserver/tests/sequence_nest_layer_group.conf"
,
1e-5
,
useGpu
);
}
...
...
@@ -130,8 +130,8 @@ TEST(RecurrentGradientMachine, HasSubSequence) {
TEST
(
RecurrentGradientMachine
,
rnn
)
{
for
(
bool
useGpu
:
{
false
,
true
})
{
test
(
"gserver/tests/sequence_rnn.conf"
,
"gserver/tests/sequence_nest_rnn.conf"
,
test
(
"
legacy/
gserver/tests/sequence_rnn.conf"
,
"
legacy/
gserver/tests/sequence_nest_rnn.conf"
,
1e-6
,
useGpu
);
}
...
...
@@ -139,8 +139,8 @@ TEST(RecurrentGradientMachine, rnn) {
TEST
(
RecurrentGradientMachine
,
rnn_multi_input
)
{
for
(
bool
useGpu
:
{
false
,
true
})
{
test
(
"gserver/tests/sequence_rnn_multi_input.conf"
,
"gserver/tests/sequence_nest_rnn_multi_input.conf"
,
test
(
"
legacy/
gserver/tests/sequence_rnn_multi_input.conf"
,
"
legacy/
gserver/tests/sequence_nest_rnn_multi_input.conf"
,
1e-6
,
useGpu
);
}
...
...
@@ -148,8 +148,8 @@ TEST(RecurrentGradientMachine, rnn_multi_input) {
TEST
(
RecurrentGradientMachine
,
rnn_multi_unequalength_input
)
{
for
(
bool
useGpu
:
{
false
,
true
})
{
test
(
"gserver/tests/sequence_rnn_multi_unequalength_inputs.py"
,
"gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py"
,
test
(
"
legacy/
gserver/tests/sequence_rnn_multi_unequalength_inputs.py"
,
"
legacy/
gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py"
,
1e-6
,
useGpu
);
}
...
...
@@ -157,8 +157,8 @@ TEST(RecurrentGradientMachine, rnn_multi_unequalength_input) {
TEST
(
RecurrentGradientMachine
,
rnn_mixed_input
)
{
for
(
bool
useGpu
:
{
false
,
true
})
{
test
(
"gserver/tests/sequence_rnn_mixed_inputs.py"
,
"gserver/tests/sequence_rnn_matched_inputs.py"
,
test
(
"
legacy/
gserver/tests/sequence_rnn_mixed_inputs.py"
,
"
legacy/
gserver/tests/sequence_rnn_matched_inputs.py"
,
1e-6
,
useGpu
);
}
...
...
paddle/legacy/gserver/tests/test_SelectiveFCLayer.cpp
浏览文件 @
4121ad3e
...
...
@@ -76,7 +76,7 @@ void calcOutput(ComData& comData,
FLAGS_config
=
configFile
;
FLAGS_config_args
=
configArgs
;
FLAGS_use_gpu
=
useGpu
;
FLAGS_init_model_path
=
"gserver/tests/SelectiveFcTest/model"
;
FLAGS_init_model_path
=
"
legacy/
gserver/tests/SelectiveFcTest/model"
;
*
ThreadLocalRand
::
getSeed
()
=
0
;
srand
(
0
);
...
...
@@ -311,13 +311,13 @@ LayerPtr initFcLayer(LayerPtr dataLayer,
#ifndef PADDLE_TYPE_DOUBLE
// The parameter file used in fc.conf and selective_fc.conf is float
TEST
(
Layer
,
SelectiveFcLayer_train_dense_mul
)
{
const
string
&
fcConfig
=
"gserver/tests/SelectiveFcTest/conf/fc.conf"
;
const
string
&
fcConfig
=
"
legacy/
gserver/tests/SelectiveFcTest/conf/fc.conf"
;
const
string
&
fcConfigArgs
=
"filelist=gserver/tests/SelectiveFcTest/dense_mul_list"
;
"filelist=
legacy/
gserver/tests/SelectiveFcTest/dense_mul_list"
;
const
string
&
selFcConfig
=
"gserver/tests/SelectiveFcTest/conf/selective_fc.conf"
;
"
legacy/
gserver/tests/SelectiveFcTest/conf/selective_fc.conf"
;
const
string
&
selConfigArgs
=
"filelist=gserver/tests/SelectiveFcTest/dense_mul_list"
;
"filelist=
legacy/
gserver/tests/SelectiveFcTest/dense_mul_list"
;
for
(
auto
useGpu
:
{
false
,
true
})
{
#ifndef PADDLE_WITH_CUDA
...
...
@@ -350,7 +350,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config,
creatDataLayer
(
"data"
,
batchSize
,
dataLayerSize
,
values
,
useGpu
);
const
string
&
selfcParaFile
=
"gserver/tests/SelectiveFcTest/model/rand_fc_param.w.transpose"
;
"
legacy/
gserver/tests/SelectiveFcTest/model/rand_fc_param.w.transpose"
;
const
string
&
selfcParaName
=
"rand_fc_param.w.transpose"
;
std
::
shared_ptr
<
SelectiveFullyConnectedLayer
>
selfcLayer
=
...
...
@@ -396,7 +396,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config,
size_t
nnz
=
cpuOutMatSelfc
->
getElementCnt
();
const
string
&
fcParaFile
=
"gserver/tests/SelectiveFcTest/model/rand_fc_param.w"
;
"
legacy/
gserver/tests/SelectiveFcTest/model/rand_fc_param.w"
;
const
string
&
fcParaName
=
"rand_fc_param.w"
;
LayerConfig
fcLayerConfig
;
fcLayerConfig
.
set_name
(
"fc_layer"
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录