提交 3eb42bfd 编写于 作者: T tensor-tang

move test_CompareMKLDNNandCPU to test_MKLDNN and remove unused code

上级 56f6e231
......@@ -521,12 +521,16 @@ void MKLDNNTester::getOutResult(const std::string& configPath,
gradientMachine->forward(in.inArgs[i], &outArgs, PASS_TRAIN);
// save forward result
for (size_t k = 0; k < outArgs.size(); k++) {
MatrixPtr value = Matrix::create(outArgs[k].value->getHeight(),
outArgs[k].value->getWidth(),
false,
false);
value->copyFrom(*outArgs[k].value);
out.outValues.push_back(value);
const MatrixPtr& src = outArgs[k].value;
MatrixPtr dst =
Matrix::create(src->getHeight(), src->getWidth(), false, false);
if (typeid(*src) == typeid(MKLDNNMatrix)) {
MKLDNNMatrixPtr dnnSrc = std::dynamic_pointer_cast<MKLDNNMatrix>(src);
dnnSrc->copyTo(*dst);
} else {
dst->copyFrom(*src);
}
out.outValues.push_back(dst);
}
// random backward input
......@@ -559,9 +563,9 @@ void MKLDNNTester::compareResult(DataOut& ref, DataOut& dnn, float eps) {
}
}
void MKLDNNTester::runBranchesTest(const std::string& configPath,
size_t iter,
float eps) {
void MKLDNNTester::runNetTest(const std::string& configPath,
size_t iter,
float eps) {
DataIn in;
initArgument(in, configPath, iter);
DataOut outCpu, outDnn;
......
......@@ -85,17 +85,17 @@ public:
bool printDetails = false,
size_t iter = 3,
float epsilon = 1e-4);
static void runBranchesTest(const std::string& configPath,
size_t iter = 3,
float eps = 1e-4);
static void runNetTest(const std::string& configPath,
size_t iter = 2,
float eps = 1e-4);
static void initArgument(DataIn& data,
const std::string& configPath,
size_t iter = 3);
size_t iter = 2);
static void getOutResult(const std::string& configPath,
DataIn& in,
DataOut& out,
bool use_mkldnn,
size_t iter = 3);
size_t iter = 2);
private:
void reset(const TestConfig& dnn, const TestConfig& ref, size_t batchSize);
......
......@@ -14,36 +14,82 @@
from paddle.trainer_config_helpers import *
################################### Data Configuration ###################################
TrainData(ProtoData(files = "trainer/tests/mnist.list"))
################################### Algorithm Configuration ###################################
settings(batch_size = 128,
learning_method = MomentumOptimizer(momentum=0.5, sparse=False))
################################### Network Configuration ###################################
data = data_layer(name ="input", size=784)
settings(batch_size=16)
channels = get_config_arg("channels", int, 2)
def two_conv(input, group_name):
out1 = img_conv_layer(input=input,
name=group_name+'_conv1_',
filter_size=1,
num_filters=channels,
padding=0,
shared_biases=True,
act=ReluActivation())
out2 = img_conv_layer(input=input,
name=group_name+'_conv2_',
filter_size=3,
num_filters=channels,
padding=1,
shared_biases=True,
act=ReluActivation())
return out1, out2
def two_conv_bn(input, group_name):
out1, out2 = two_conv(input, group_name)
out1 = batch_norm_layer(input=out1,
name=group_name+'_bn1_',
use_global_stats=False,
act=ReluActivation())
out2 = batch_norm_layer(input=out2,
name=group_name+'_bn2_',
use_global_stats=False,
act=ReluActivation())
return out1, out2
def two_conv_pool(input, group_name):
out1, out2 = two_conv(input, group_name)
out1 = img_pool_layer(input=out1,
name=group_name+'_pool1_',
pool_size=3,
stride=2,
padding=0,
pool_type=MaxPooling())
out2 = img_pool_layer(input=out2,
name=group_name+'_pool2_',
pool_size=5,
stride=2,
padding=1,
pool_type=MaxPooling())
return out1, out2
def two_fc(input, group_name):
out1 = fc_layer(input=input,
name=group_name+'_fc1_',
size=channels,
bias_attr=False,
act=LinearActivation())
tmp = img_conv_layer(input=data,
num_channels=1,
filter_size=3,
num_filters=32,
padding=1,
shared_biases=True,
act=ReluActivation())
out2 = fc_layer(input=input,
name=group_name+'_fc2_',
size=channels,
bias_attr=False,
act=LinearActivation())
return out1, out2
a1 = img_conv_layer(input=tmp,
filter_size=1,
num_filters=32,
padding=0,
shared_biases=True,
act=ReluActivation())
data = data_layer(name ="input", size=channels*16*16)
a2 = img_conv_layer(input=tmp,
tmp = img_conv_layer(input=data,
num_channels=channels,
filter_size=3,
num_filters=32,
num_filters=channels,
padding=1,
shared_biases=True,
act=ReluActivation())
a1, a2 = two_conv(tmp, 'conv_branch')
tmp = addto_layer(input=[a1, a2],
act=ReluActivation(),
bias_attr=False)
......@@ -54,36 +100,11 @@ tmp = img_pool_layer(input=tmp,
padding=1,
pool_type=AvgPooling())
b1 = img_conv_layer(input=tmp,
filter_size=3,
num_filters=32,
padding=1,
shared_biases=True,
act=ReluActivation())
b1 = img_pool_layer(input=b1,
pool_size=3,
stride=2,
padding=0,
pool_type=MaxPooling())
b2 = img_conv_layer(input=tmp,
filter_size=3,
num_filters=64,
padding=1,
shared_biases=True,
act=ReluActivation())
b2 = img_pool_layer(input=b2,
pool_size=5,
stride=2,
padding=1,
pool_type=MaxPooling())
b1, b2 = two_conv_pool(tmp, 'pool_branch')
tmp = concat_layer(input=[b1, b2])
tmp = img_pool_layer(input=tmp,
num_channels=96,
num_channels=channels*2,
pool_size=3,
stride=2,
padding=1,
......@@ -91,8 +112,9 @@ tmp = img_pool_layer(input=tmp,
tmp = img_conv_layer(input=tmp,
filter_size=3,
num_filters=32,
num_filters=channels,
padding=1,
stride=2,
shared_biases=True,
act=LinearActivation(),
bias_attr=False)
......@@ -101,33 +123,20 @@ tmp = batch_norm_layer(input=tmp,
use_global_stats=False,
act=ReluActivation())
c1 = img_conv_layer(input=tmp,
filter_size=1,
num_filters=32,
padding=0,
shared_biases=True,
act=ReluActivation())
c2 = img_conv_layer(input=tmp,
filter_size=3,
num_filters=32,
padding=1,
shared_biases=True,
act=ReluActivation())
c1, c2 = two_conv_bn(tmp, 'bn_branch')
tmp = addto_layer(input=[c1, c2],
act=ReluActivation(),
bias_attr=False)
tmp = fc_layer(input=tmp, size=64,
bias_attr=False,
act=TanhActivation())
tmp = fc_layer(input=tmp, size=channels,
bias_attr=True,
act=ReluActivation())
output = fc_layer(input=tmp, size=10,
d1, d2 = two_fc(tmp, 'fc_branch')
tmp = addto_layer(input=[d1, d2])
out = fc_layer(input=tmp, size=10,
bias_attr=True,
act=SoftmaxActivation())
lbl = data_layer(name ="label", size=10)
cost = classification_cost(input=output, label=lbl)
outputs(cost)
outputs(out)
# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
settings(batch_size=16)
channels = get_config_arg("channels", int, 2)
def two_fc(input, group_name):
out1 = fc_layer(input=input,
name=group_name+'_fc1',
size=channels,
bias_attr=False,
act=LinearActivation())
out2 = fc_layer(input=input,
name=group_name+'_fc2',
size=channels,
bias_attr=False,
act=LinearActivation())
return out1, out2
data = data_layer(name ="input", size=channels*16*16)
conv = img_conv_layer(input=data,
num_channels=channels,
filter_size=3,
num_filters=channels,
padding=1,
shared_biases=True,
act=LinearActivation())
pool = img_pool_layer(input=conv,
pool_size=3,
stride=2,
padding=1,
pool_type=AvgPooling())
a1, a2 = two_fc(input=pool, group_name='a')
concat = concat_layer(input=[a1, a2])
b1, b2 = two_fc(input=pool, group_name='b')
addto = addto_layer(input=[b1, b2])
outputs([concat, addto])
# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
settings(batch_size=16)
channels = get_config_arg("channels", int, 2)
def two_pool(input, group_name):
out1 = img_pool_layer(input=input,
name=group_name+'_pool1',
pool_size=3,
stride=2,
padding=0,
pool_type=MaxPooling())
out2 = img_pool_layer(input=input,
name=group_name+'_pool2',
pool_size=5,
stride=2,
padding=1,
pool_type=MaxPooling())
return out1, out2
data = data_layer(name ="input", size=channels*16*16)
conv = img_conv_layer(input=data,
num_channels=channels,
filter_size=3,
num_filters=channels,
padding=1,
shared_biases=True,
act=LinearActivation())
pool = img_pool_layer(input=conv,
pool_size=3,
stride=1,
padding=1,
pool_type=AvgPooling())
a1, a2 = two_pool(input=pool, group_name='a')
concat = concat_layer(input=[a1, a2])
b1, b2 = two_pool(input=pool, group_name='b')
addto = addto_layer(input=[b1, b2])
outputs([concat, addto])
......@@ -17,40 +17,48 @@ from paddle.trainer_config_helpers import *
settings(batch_size=16)
channels = get_config_arg("channels", int, 2)
def two_conv(input, group_name):
out1 = img_conv_layer(input=input,
name=group_name+'_conv1',
filter_size=1,
num_filters=channels,
padding=0,
shared_biases=True,
act=ReluActivation())
data = data_layer(name ="input", size=channels*16*16)
out2 = img_conv_layer(input=input,
name=group_name+'_conv2',
tmp = img_conv_layer(input=data,
num_channels=channels,
filter_size=3,
num_filters=channels,
padding=1,
shared_biases=True,
act=ReluActivation())
return out1, out2
data = data_layer(name ="input", size=channels*16*16)
tmp = img_pool_layer(input=tmp,
pool_size=3,
stride=1,
padding=0,
pool_type=AvgPooling())
conv = img_conv_layer(input=data,
num_channels=channels,
tmp = img_conv_layer(input=tmp,
filter_size=3,
num_filters=channels,
padding=1,
shared_biases=True,
act=ReluActivation())
act=LinearActivation(),
bias_attr=False)
a1, a2 = two_conv(input=conv, group_name='a')
tmp = batch_norm_layer(input=tmp,
use_global_stats=False,
act=ReluActivation())
concat = concat_layer(input=[a1, a2])
tmp = img_pool_layer(input=tmp,
pool_size=3,
stride=2,
padding=1,
pool_type=MaxPooling())
b1, b2 = two_conv(input=conv, group_name='b')
tmp = fc_layer(input=tmp,
size=channels,
bias_attr=False,
act=ReluActivation())
addto = addto_layer(input=[b1, b2])
out = fc_layer(input=tmp,
size=10,
bias_attr=True,
act=SoftmaxActivation())
outputs([concat, addto])
outputs(out)
......@@ -308,15 +308,15 @@ TEST(MKLDNNActivation, Activations) {
}
DECLARE_string(config_args);
TEST(MKLDNNLayer, branches) {
std::vector<std::string> cases = {"conv", "pool", "fc"};
TEST(MKLDNNNet, net) {
std::vector<std::string> cases = {"simple", "branch"};
for (auto name : cases) {
std::string config = "./gserver/tests/mkldnn_branches_" + name + ".conf";
std::string config = "./gserver/tests/mkldnn_" + name + "_net.conf";
for (auto channels : {2, 32}) {
std::ostringstream oss;
oss << "channels=" << channels;
FLAGS_config_args = oss.str();
MKLDNNTester::runBranchesTest(config);
MKLDNNTester::runNetTest(config);
}
}
}
......
......@@ -102,6 +102,11 @@ public:
m_->copyFrom(src);
}
void copyTo(Matrix& dst) {
// TODO(TJ): reorder data if this format is not nchw or x
dst.copyFrom(*m_);
}
public:
/**
* Reorder this MKLDNNMatrix from other format.
......
......@@ -37,22 +37,6 @@ add_test(NAME test_CompareTwoNets
--config_file_a=trainer/tests/sample_trainer_config_qb_rnn.conf --config_file_b=trainer/tests/sample_trainer_config_rnn.conf
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/)
################ test_CompareMKLDNNandCPU ######################
if(WITH_MKLDNN)
macro(gen_command VAR_NAME CONFIG_FILE)
set(${VAR_NAME} "${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh" "-d" "${PADDLE_SOURCE_DIR}/python/"
"${CMAKE_CURRENT_BINARY_DIR}/test_CompareMKLDNNandCPU --use_gpu=False"
"--config_file_a=trainer/tests/${CONFIG_FILE} --use_mkldnn_a=True"
"--config_file_b=trainer/tests/${CONFIG_FILE} --use_mkldnn_b=False"
"WORKING_DIRECTORY" "${PADDLE_SOURCE_DIR}/paddle/")
endmacro()
add_unittest_without_exec(test_CompareMKLDNNandCPU test_CompareTwoNets.cpp)
gen_command(compare_simple_net "sample_trainer_config_simple_net.conf")
gen_command(compare_branch_net "sample_trainer_config_branch_net.conf")
add_test(NAME test_CompareMKLDNNandCPU_simple_net COMMAND ${compare_simple_net})
add_test(NAME test_CompareMKLDNNandCPU_branch_net COMMAND ${compare_branch_net})
endif()
############### test_CompareTwoOpts ###################
add_unittest_without_exec(test_CompareTwoOpts
test_CompareTwoOpts.cpp)
......
# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
################################### Data Configuration ###################################
TrainData(ProtoData(files = "trainer/tests/mnist.list"))
################################### Algorithm Configuration ###################################
settings(batch_size = 128,
learning_method = MomentumOptimizer(momentum=0.5, sparse=False))
################################### Network Configuration ###################################
data = data_layer(name ="input", size=784)
tmp = img_conv_layer(input=data,
num_channels=1,
filter_size=3,
num_filters=32,
padding=1,
shared_biases=True,
act=ReluActivation())
tmp = img_pool_layer(input=tmp,
pool_size=3,
stride=2,
padding=1,
pool_type=AvgPooling())
tmp = img_conv_layer(input=tmp,
filter_size=3,
num_filters=32,
padding=1,
shared_biases=True,
act=LinearActivation(),
bias_attr=False)
tmp = batch_norm_layer(input=tmp,
use_global_stats=False,
act=ReluActivation())
tmp = img_pool_layer(input=tmp,
pool_size=3,
stride=2,
padding=1,
pool_type=MaxPooling())
tmp = fc_layer(input=tmp, size=64,
bias_attr=True,
act=ReluActivation())
output = fc_layer(input=tmp, size=10,
bias_attr=True,
act=SoftmaxActivation())
lbl = data_layer(name ="label", size=10)
cost = classification_cost(input=output, label=lbl)
outputs(cost)
......@@ -26,15 +26,12 @@ DECLARE_int32(gpu_id);
DECLARE_bool(local);
DECLARE_bool(use_gpu);
DECLARE_bool(use_mkldnn);
DECLARE_string(config);
DECLARE_string(nics);
DEFINE_string(config_file_a, "", "config of one network to compare");
DEFINE_string(config_file_b, "", "config of another network to compare");
DEFINE_bool(use_mkldnn_a, false, "whether to use mkldnn to run config_file_a");
DEFINE_bool(use_mkldnn_b, false, "whether to use mkldnn to run config_file_b");
DEFINE_bool(need_high_accuracy,
false,
"whether need to run in double accuracy");
......@@ -131,12 +128,6 @@ void compareGradient(ComData& comDataA, ComData& comDataB) {
matA.getWidth());
}
if (FLAGS_use_mkldnn_a || FLAGS_use_mkldnn_b) {
// some format of mkldnn parameter is different with cpu
// test_MKLDNN will check the parameters
return;
}
vector<ParameterPtr>& parametersA = comDataA.parameters;
vector<ParameterPtr>& parametersB = comDataB.parameters;
......@@ -176,12 +167,10 @@ void compareGradient(ComData& comDataA, ComData& comDataB) {
TEST(Trainer, create) {
ComData dataA;
FLAGS_use_mkldnn = FLAGS_use_mkldnn_a;
calcGradient(dataA, FLAGS_config_file_a);
LOG(INFO) << "\n\nforwardBackward of Network A is finished\n\n";
ComData dataB;
FLAGS_use_mkldnn = FLAGS_use_mkldnn_b;
calcGradient(dataB, FLAGS_config_file_b);
LOG(INFO) << "\n\nforwardBackward of the Network B is finished\n\n";
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册