提交 634facec 编写于 作者: Z zchen0211

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into develop

# Benchmark
Machine:
- Server
- Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz, 2 Sockets, 20 Cores per socket
- Laptop
- DELL XPS15-9560-R1745: i7-7700HQ 8G 256GSSD
- i5 MacBook Pro (Retina, 13-inch, Early 2015)
- Desktop
- i7-6700k
System: CentOS release 6.3 (Final), Docker 1.12.1.
PaddlePaddle: paddlepaddle/paddle:latest (TODO: will rerun after 0.11.0)
- MKL-DNN tag v0.10
- MKLML 2018.0.20170720
- OpenBLAS v0.2.20
On each machine, we will test and compare the performance of training on single node using MKL-DNN / MKLML / OpenBLAS respectively.
## Benchmark Model
### Server
Test on batch size 64, 128, 256 on Intel(R) Xeon(R) Gold 6148M CPU @ 2.40GHz
Input image size - 3 * 224 * 224, Time: images/second
- VGG-19
| BatchSize | 64 | 128 | 256 |
|--------------|-------| -----| --------|
| OpenBLAS | 7.82 | 8.62 | 10.34 |
| MKLML | 11.02 | 12.86 | 15.33 |
| MKL-DNN | 27.69 | 28.8 | 29.27 |
chart on batch size 128
TBD
- ResNet
- GoogLeNet
### Laptop
TBD
### Desktop
TBD
...@@ -273,31 +273,37 @@ void MKLDNNTester::printVector(const VectorPtr& v) { ...@@ -273,31 +273,37 @@ void MKLDNNTester::printVector(const VectorPtr& v) {
VLOG(MKLDNN_ALL) << std::endl << ostr.str(); VLOG(MKLDNN_ALL) << std::endl << ostr.str();
} }
double MKLDNNTester::getDelta(const real* d1, double MKLDNNTester::getDelta(const real* refer,
const real* d2, const real* value,
size_t len, size_t len,
const float failRate, const float failRate,
const float thres) { const float thres) {
double delta = 0, sum = 0; double delta = 0, sum = 0;
int failCnt = 0; int failCnt = 0;
const double eps = 1e-5; const double eps = 1e-5;
double maxOut = 0; double maxRatio = 0;
for (size_t i = 0; i < len; ++i) { for (size_t i = 0; i < len; ++i) {
double ref = fabs(d2[i]); double ref = fabs(refer[i]);
double diff = fabs(d1[i] - d2[i]); double val = fabs(value[i]);
double diff = fabs(refer[i] - value[i]);
delta += diff; delta += diff;
sum += ref; sum += ref;
if (ref > eps && fabs(d1[i]) > eps && diff / ref > thres) { if (ref < eps && val < eps) { // both values are very small
maxOut = std::max(maxOut, diff / ref); continue;
}
double ratio = diff / ref;
if (ratio > thres) {
maxRatio = std::max(maxRatio, ratio);
failCnt++; failCnt++;
} }
} }
EXPECT_TRUE(std::isnormal(sum));
EXPECT_FALSE(std::isinf(sum)); EXPECT_FALSE(std::isinf(sum));
EXPECT_FALSE(std::isnan(sum));
EXPECT_FALSE(std::isnan(delta)); EXPECT_FALSE(std::isnan(delta));
VLOG(MKLDNN_ALL) << "reference avg data: " << sum / len VLOG(MKLDNN_ALL) << "reference avg data: " << sum / len
<< ", delta: " << delta / sum << ", failCnt:" << failCnt; << ", delta: " << delta / sum << ", failCnt:" << failCnt;
return (failCnt / (float)len) > failRate ? maxOut : delta / sum; double res = sum > eps ? delta / sum : eps;
return (failCnt / (float)len) > failRate ? maxRatio : res;
} }
double MKLDNNTester::compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2) { double MKLDNNTester::compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2) {
...@@ -515,12 +521,16 @@ void MKLDNNTester::getOutResult(const std::string& configPath, ...@@ -515,12 +521,16 @@ void MKLDNNTester::getOutResult(const std::string& configPath,
gradientMachine->forward(in.inArgs[i], &outArgs, PASS_TRAIN); gradientMachine->forward(in.inArgs[i], &outArgs, PASS_TRAIN);
// save forward result // save forward result
for (size_t k = 0; k < outArgs.size(); k++) { for (size_t k = 0; k < outArgs.size(); k++) {
MatrixPtr value = Matrix::create(outArgs[k].value->getHeight(), const MatrixPtr& src = outArgs[k].value;
outArgs[k].value->getWidth(), MatrixPtr dst =
false, Matrix::create(src->getHeight(), src->getWidth(), false, false);
false); if (typeid(*src) == typeid(MKLDNNMatrix)) {
value->copyFrom(*outArgs[k].value); MKLDNNMatrixPtr dnnSrc = std::dynamic_pointer_cast<MKLDNNMatrix>(src);
out.outValues.push_back(value); dnnSrc->copyTo(*dst);
} else {
dst->copyFrom(*src);
}
out.outValues.push_back(dst);
} }
// random backward input // random backward input
...@@ -543,19 +553,19 @@ void MKLDNNTester::getOutResult(const std::string& configPath, ...@@ -543,19 +553,19 @@ void MKLDNNTester::getOutResult(const std::string& configPath,
void MKLDNNTester::compareResult(DataOut& ref, DataOut& dnn, float eps) { void MKLDNNTester::compareResult(DataOut& ref, DataOut& dnn, float eps) {
CHECK_EQ(ref.outValues.size(), dnn.outValues.size()); CHECK_EQ(ref.outValues.size(), dnn.outValues.size());
CHECK_EQ(ref.paraValues.size(), dnn.paraValues.size()); CHECK_EQ(ref.paraValues.size(), dnn.paraValues.size());
VLOG(MKLDNN_TESTS) << "compare value size: " << ref.outValues.size();
for (size_t i = 0; i < ref.outValues.size(); i++) { for (size_t i = 0; i < ref.outValues.size(); i++) {
VLOG(MKLDNN_TESTS) << "compare value index: " << i;
EXPECT_LE(fabs(compareMatrix(ref.outValues[i], dnn.outValues[i])), eps); EXPECT_LE(fabs(compareMatrix(ref.outValues[i], dnn.outValues[i])), eps);
} }
VLOG(MKLDNN_TESTS) << "compare param size: " << ref.outValues.size();
for (size_t i = 0; i < ref.paraValues.size(); i++) { for (size_t i = 0; i < ref.paraValues.size(); i++) {
VLOG(MKLDNN_TESTS) << "compare param index: " << i;
EXPECT_LE(fabs(compareVector(ref.paraValues[i], dnn.paraValues[i])), eps); EXPECT_LE(fabs(compareVector(ref.paraValues[i], dnn.paraValues[i])), eps);
} }
} }
void MKLDNNTester::runBranchesTest(const std::string& configPath, void MKLDNNTester::runNetTest(const std::string& configPath,
size_t iter, size_t iter,
float eps) { float eps) {
DataIn in; DataIn in;
initArgument(in, configPath, iter); initArgument(in, configPath, iter);
DataOut outCpu, outDnn; DataOut outCpu, outDnn;
......
...@@ -85,17 +85,17 @@ public: ...@@ -85,17 +85,17 @@ public:
bool printDetails = false, bool printDetails = false,
size_t iter = 3, size_t iter = 3,
float epsilon = 1e-4); float epsilon = 1e-4);
static void runBranchesTest(const std::string& configPath, static void runNetTest(const std::string& configPath,
size_t iter = 3, size_t iter = 2,
float eps = 1e-4); float eps = 1e-4);
static void initArgument(DataIn& data, static void initArgument(DataIn& data,
const std::string& configPath, const std::string& configPath,
size_t iter = 3); size_t iter = 2);
static void getOutResult(const std::string& configPath, static void getOutResult(const std::string& configPath,
DataIn& in, DataIn& in,
DataOut& out, DataOut& out,
bool use_mkldnn, bool use_mkldnn,
size_t iter = 3); size_t iter = 2);
private: private:
void reset(const TestConfig& dnn, const TestConfig& ref, size_t batchSize); void reset(const TestConfig& dnn, const TestConfig& ref, size_t batchSize);
...@@ -128,13 +128,13 @@ private: ...@@ -128,13 +128,13 @@ private:
/** /**
* Get delta percent * Get delta percent
* if many(>failRate) wrong(abs(dnn-ref)/abs(ref)>thres) points return the * if many(>failRate) wrong(abs(val-ref)/abs(ref) > thres) points
* max(diff/ref) * return the max(diff/ref)
* else return sum(abs(a-b)) / sum(abs(b)) * else return sum(abs(diff)) / sum(abs(ref))
* The return value should be smaller than eps when passing. * The return value should be smaller than eps when passing.
*/ */
static double getDelta(const real* d1, static double getDelta(const real* refer,
const real* d2, const real* value,
size_t len, size_t len,
const float failRate = 1e-3, const float failRate = 1e-3,
const float thres = 0.1); const float thres = 0.1);
......
...@@ -14,36 +14,82 @@ ...@@ -14,36 +14,82 @@
from paddle.trainer_config_helpers import * from paddle.trainer_config_helpers import *
################################### Data Configuration ################################### settings(batch_size=16)
TrainData(ProtoData(files = "trainer/tests/mnist.list")) channels = get_config_arg("channels", int, 2)
################################### Algorithm Configuration ###################################
settings(batch_size = 128, def two_conv(input, group_name):
learning_method = MomentumOptimizer(momentum=0.5, sparse=False)) out1 = img_conv_layer(input=input,
################################### Network Configuration ################################### name=group_name+'_conv1_',
data = data_layer(name ="input", size=784) filter_size=1,
num_filters=channels,
padding=0,
shared_biases=True,
act=ReluActivation())
out2 = img_conv_layer(input=input,
name=group_name+'_conv2_',
filter_size=3,
num_filters=channels,
padding=1,
shared_biases=True,
act=ReluActivation())
return out1, out2
def two_conv_bn(input, group_name):
out1, out2 = two_conv(input, group_name)
out1 = batch_norm_layer(input=out1,
name=group_name+'_bn1_',
use_global_stats=False,
act=ReluActivation())
out2 = batch_norm_layer(input=out2,
name=group_name+'_bn2_',
use_global_stats=False,
act=ReluActivation())
return out1, out2
def two_conv_pool(input, group_name):
out1, out2 = two_conv(input, group_name)
out1 = img_pool_layer(input=out1,
name=group_name+'_pool1_',
pool_size=3,
stride=2,
padding=0,
pool_type=MaxPooling())
out2 = img_pool_layer(input=out2,
name=group_name+'_pool2_',
pool_size=5,
stride=2,
padding=1,
pool_type=MaxPooling())
return out1, out2
def two_fc(input, group_name):
out1 = fc_layer(input=input,
name=group_name+'_fc1_',
size=channels,
bias_attr=False,
act=LinearActivation())
tmp = img_conv_layer(input=data, out2 = fc_layer(input=input,
num_channels=1, name=group_name+'_fc2_',
filter_size=3, size=channels,
num_filters=32, bias_attr=False,
padding=1, act=LinearActivation())
shared_biases=True, return out1, out2
act=ReluActivation())
a1 = img_conv_layer(input=tmp, data = data_layer(name ="input", size=channels*16*16)
filter_size=1,
num_filters=32,
padding=0,
shared_biases=True,
act=ReluActivation())
a2 = img_conv_layer(input=tmp, tmp = img_conv_layer(input=data,
num_channels=channels,
filter_size=3, filter_size=3,
num_filters=32, num_filters=channels,
padding=1, padding=1,
shared_biases=True, shared_biases=True,
act=ReluActivation()) act=ReluActivation())
a1, a2 = two_conv(tmp, 'conv_branch')
tmp = addto_layer(input=[a1, a2], tmp = addto_layer(input=[a1, a2],
act=ReluActivation(), act=ReluActivation(),
bias_attr=False) bias_attr=False)
...@@ -54,36 +100,11 @@ tmp = img_pool_layer(input=tmp, ...@@ -54,36 +100,11 @@ tmp = img_pool_layer(input=tmp,
padding=1, padding=1,
pool_type=AvgPooling()) pool_type=AvgPooling())
b1 = img_conv_layer(input=tmp, b1, b2 = two_conv_pool(tmp, 'pool_branch')
filter_size=3,
num_filters=32,
padding=1,
shared_biases=True,
act=ReluActivation())
b1 = img_pool_layer(input=b1,
pool_size=3,
stride=2,
padding=0,
pool_type=MaxPooling())
b2 = img_conv_layer(input=tmp,
filter_size=3,
num_filters=64,
padding=1,
shared_biases=True,
act=ReluActivation())
b2 = img_pool_layer(input=b2,
pool_size=5,
stride=2,
padding=1,
pool_type=MaxPooling())
tmp = concat_layer(input=[b1, b2]) tmp = concat_layer(input=[b1, b2])
tmp = img_pool_layer(input=tmp, tmp = img_pool_layer(input=tmp,
num_channels=96, num_channels=channels*2,
pool_size=3, pool_size=3,
stride=2, stride=2,
padding=1, padding=1,
...@@ -91,8 +112,9 @@ tmp = img_pool_layer(input=tmp, ...@@ -91,8 +112,9 @@ tmp = img_pool_layer(input=tmp,
tmp = img_conv_layer(input=tmp, tmp = img_conv_layer(input=tmp,
filter_size=3, filter_size=3,
num_filters=32, num_filters=channels,
padding=1, padding=1,
stride=2,
shared_biases=True, shared_biases=True,
act=LinearActivation(), act=LinearActivation(),
bias_attr=False) bias_attr=False)
...@@ -101,33 +123,20 @@ tmp = batch_norm_layer(input=tmp, ...@@ -101,33 +123,20 @@ tmp = batch_norm_layer(input=tmp,
use_global_stats=False, use_global_stats=False,
act=ReluActivation()) act=ReluActivation())
c1 = img_conv_layer(input=tmp, c1, c2 = two_conv_bn(tmp, 'bn_branch')
filter_size=1,
num_filters=32,
padding=0,
shared_biases=True,
act=ReluActivation())
c2 = img_conv_layer(input=tmp,
filter_size=3,
num_filters=32,
padding=1,
shared_biases=True,
act=ReluActivation())
tmp = addto_layer(input=[c1, c2], tmp = addto_layer(input=[c1, c2],
act=ReluActivation(), act=ReluActivation(),
bias_attr=False) bias_attr=False)
tmp = fc_layer(input=tmp, size=64, tmp = fc_layer(input=tmp, size=channels,
bias_attr=False, bias_attr=True,
act=TanhActivation()) act=ReluActivation())
output = fc_layer(input=tmp, size=10, d1, d2 = two_fc(tmp, 'fc_branch')
tmp = addto_layer(input=[d1, d2])
out = fc_layer(input=tmp, size=10,
bias_attr=True, bias_attr=True,
act=SoftmaxActivation()) act=SoftmaxActivation())
lbl = data_layer(name ="label", size=10) outputs(out)
cost = classification_cost(input=output, label=lbl)
outputs(cost)
# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
settings(batch_size=16)
channels = get_config_arg("channels", int, 2)
def two_fc(input, group_name):
out1 = fc_layer(input=input,
name=group_name+'_fc1',
size=channels,
bias_attr=False,
act=LinearActivation())
out2 = fc_layer(input=input,
name=group_name+'_fc2',
size=channels,
bias_attr=False,
act=LinearActivation())
return out1, out2
data = data_layer(name ="input", size=channels*16*16)
conv = img_conv_layer(input=data,
num_channels=channels,
filter_size=3,
num_filters=channels,
padding=1,
shared_biases=True,
act=LinearActivation())
pool = img_pool_layer(input=conv,
pool_size=3,
stride=2,
padding=1,
pool_type=AvgPooling())
a1, a2 = two_fc(input=pool, group_name='a')
concat = concat_layer(input=[a1, a2])
b1, b2 = two_fc(input=pool, group_name='b')
addto = addto_layer(input=[b1, b2])
outputs([concat, addto])
# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
settings(batch_size=16)
channels = get_config_arg("channels", int, 2)
def two_pool(input, group_name):
out1 = img_pool_layer(input=input,
name=group_name+'_pool1',
pool_size=3,
stride=2,
padding=0,
pool_type=MaxPooling())
out2 = img_pool_layer(input=input,
name=group_name+'_pool2',
pool_size=5,
stride=2,
padding=1,
pool_type=MaxPooling())
return out1, out2
data = data_layer(name ="input", size=channels*16*16)
conv = img_conv_layer(input=data,
num_channels=channels,
filter_size=3,
num_filters=channels,
padding=1,
shared_biases=True,
act=LinearActivation())
pool = img_pool_layer(input=conv,
pool_size=3,
stride=1,
padding=1,
pool_type=AvgPooling())
a1, a2 = two_pool(input=pool, group_name='a')
concat = concat_layer(input=[a1, a2])
b1, b2 = two_pool(input=pool, group_name='b')
addto = addto_layer(input=[b1, b2])
outputs([concat, addto])
...@@ -17,40 +17,48 @@ from paddle.trainer_config_helpers import * ...@@ -17,40 +17,48 @@ from paddle.trainer_config_helpers import *
settings(batch_size=16) settings(batch_size=16)
channels = get_config_arg("channels", int, 2) channels = get_config_arg("channels", int, 2)
def two_conv(input, group_name): data = data_layer(name ="input", size=channels*16*16)
out1 = img_conv_layer(input=input,
name=group_name+'_conv1',
filter_size=1,
num_filters=channels,
padding=0,
shared_biases=True,
act=ReluActivation())
out2 = img_conv_layer(input=input, tmp = img_conv_layer(input=data,
name=group_name+'_conv2', num_channels=channels,
filter_size=3, filter_size=3,
num_filters=channels, num_filters=channels,
padding=1, padding=1,
shared_biases=True, shared_biases=True,
act=ReluActivation()) act=ReluActivation())
return out1, out2
data = data_layer(name ="input", size=channels*16*16) tmp = img_pool_layer(input=tmp,
pool_size=3,
stride=1,
padding=0,
pool_type=AvgPooling())
conv = img_conv_layer(input=data, tmp = img_conv_layer(input=tmp,
num_channels=channels,
filter_size=3, filter_size=3,
num_filters=channels, num_filters=channels,
padding=1, padding=1,
shared_biases=True, shared_biases=True,
act=ReluActivation()) act=LinearActivation(),
bias_attr=False)
a1, a2 = two_conv(input=conv, group_name='a') tmp = batch_norm_layer(input=tmp,
use_global_stats=False,
act=ReluActivation())
concat = concat_layer(input=[a1, a2]) tmp = img_pool_layer(input=tmp,
pool_size=3,
stride=2,
padding=1,
pool_type=MaxPooling())
b1, b2 = two_conv(input=conv, group_name='b') tmp = fc_layer(input=tmp,
size=channels,
bias_attr=False,
act=ReluActivation())
addto = addto_layer(input=[b1, b2]) out = fc_layer(input=tmp,
size=10,
bias_attr=True,
act=SoftmaxActivation())
outputs([concat, addto]) outputs(out)
...@@ -234,8 +234,7 @@ static void getMKLDNNBatchNormConfig(TestConfig& cfg, ...@@ -234,8 +234,7 @@ static void getMKLDNNBatchNormConfig(TestConfig& cfg,
cfg.inputDefs.push_back({INPUT_DATA, "layer_2_moving_var", 1, size_t(pm.ic)}); cfg.inputDefs.push_back({INPUT_DATA, "layer_2_moving_var", 1, size_t(pm.ic)});
cfg.inputDefs.back().isStatic = true; cfg.inputDefs.back().isStatic = true;
LayerInputConfig* input = cfg.layerConfig.add_inputs(); LayerInputConfig* input = cfg.layerConfig.add_inputs();
// TODO(TJ): uncomment me when refine and support comparing all zeroes vector cfg.layerConfig.set_active_type("relu");
// cfg.layerConfig.set_active_type("relu");
cfg.layerConfig.add_inputs(); cfg.layerConfig.add_inputs();
cfg.layerConfig.add_inputs(); cfg.layerConfig.add_inputs();
ImageConfig* img_conf = input->mutable_image_conf(); ImageConfig* img_conf = input->mutable_image_conf();
...@@ -309,15 +308,15 @@ TEST(MKLDNNActivation, Activations) { ...@@ -309,15 +308,15 @@ TEST(MKLDNNActivation, Activations) {
} }
DECLARE_string(config_args); DECLARE_string(config_args);
TEST(MKLDNNLayer, branches) { TEST(MKLDNNNet, net) {
std::vector<std::string> cases = {"conv", "pool", "fc"}; std::vector<std::string> cases = {"simple", "branch"};
for (auto name : cases) { for (auto name : cases) {
std::string config = "./gserver/tests/mkldnn_branches_" + name + ".conf"; std::string config = "./gserver/tests/mkldnn_" + name + "_net.conf";
for (auto channels : {2, 32}) { for (auto channels : {2, 32}) {
std::ostringstream oss; std::ostringstream oss;
oss << "channels=" << channels; oss << "channels=" << channels;
FLAGS_config_args = oss.str(); FLAGS_config_args = oss.str();
MKLDNNTester::runBranchesTest(config); MKLDNNTester::runNetTest(config);
} }
} }
} }
......
...@@ -102,6 +102,11 @@ public: ...@@ -102,6 +102,11 @@ public:
m_->copyFrom(src); m_->copyFrom(src);
} }
void copyTo(Matrix& dst) {
// TODO(TJ): reorder data if this format is not nchw or x
dst.copyFrom(*m_);
}
public: public:
/** /**
* Reorder this MKLDNNMatrix from other format. * Reorder this MKLDNNMatrix from other format.
......
...@@ -37,22 +37,6 @@ add_test(NAME test_CompareTwoNets ...@@ -37,22 +37,6 @@ add_test(NAME test_CompareTwoNets
--config_file_a=trainer/tests/sample_trainer_config_qb_rnn.conf --config_file_b=trainer/tests/sample_trainer_config_rnn.conf --config_file_a=trainer/tests/sample_trainer_config_qb_rnn.conf --config_file_b=trainer/tests/sample_trainer_config_rnn.conf
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/) WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/)
################ test_CompareMKLDNNandCPU ######################
if(WITH_MKLDNN)
macro(gen_command VAR_NAME CONFIG_FILE)
set(${VAR_NAME} "${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh" "-d" "${PADDLE_SOURCE_DIR}/python/"
"${CMAKE_CURRENT_BINARY_DIR}/test_CompareMKLDNNandCPU --use_gpu=False"
"--config_file_a=trainer/tests/${CONFIG_FILE} --use_mkldnn_a=True"
"--config_file_b=trainer/tests/${CONFIG_FILE} --use_mkldnn_b=False"
"WORKING_DIRECTORY" "${PADDLE_SOURCE_DIR}/paddle/")
endmacro()
add_unittest_without_exec(test_CompareMKLDNNandCPU test_CompareTwoNets.cpp)
gen_command(compare_simple_net "sample_trainer_config_simple_net.conf")
gen_command(compare_branch_net "sample_trainer_config_branch_net.conf")
add_test(NAME test_CompareMKLDNNandCPU_simple_net COMMAND ${compare_simple_net})
add_test(NAME test_CompareMKLDNNandCPU_branch_net COMMAND ${compare_branch_net})
endif()
############### test_CompareTwoOpts ################### ############### test_CompareTwoOpts ###################
add_unittest_without_exec(test_CompareTwoOpts add_unittest_without_exec(test_CompareTwoOpts
test_CompareTwoOpts.cpp) test_CompareTwoOpts.cpp)
......
# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
################################### Data Configuration ###################################
TrainData(ProtoData(files = "trainer/tests/mnist.list"))
################################### Algorithm Configuration ###################################
settings(batch_size = 128,
learning_method = MomentumOptimizer(momentum=0.5, sparse=False))
################################### Network Configuration ###################################
data = data_layer(name ="input", size=784)
tmp = img_conv_layer(input=data,
num_channels=1,
filter_size=3,
num_filters=32,
padding=1,
shared_biases=True,
act=ReluActivation())
tmp = img_pool_layer(input=tmp,
pool_size=3,
stride=2,
padding=1,
pool_type=AvgPooling())
tmp = img_conv_layer(input=tmp,
filter_size=3,
num_filters=32,
padding=1,
shared_biases=True,
act=LinearActivation(),
bias_attr=False)
tmp = batch_norm_layer(input=tmp,
use_global_stats=False,
act=ReluActivation())
tmp = img_pool_layer(input=tmp,
pool_size=3,
stride=2,
padding=1,
pool_type=MaxPooling())
tmp = fc_layer(input=tmp, size=64,
bias_attr=True,
act=ReluActivation())
output = fc_layer(input=tmp, size=10,
bias_attr=True,
act=SoftmaxActivation())
lbl = data_layer(name ="label", size=10)
cost = classification_cost(input=output, label=lbl)
outputs(cost)
...@@ -26,15 +26,12 @@ DECLARE_int32(gpu_id); ...@@ -26,15 +26,12 @@ DECLARE_int32(gpu_id);
DECLARE_bool(local); DECLARE_bool(local);
DECLARE_bool(use_gpu); DECLARE_bool(use_gpu);
DECLARE_bool(use_mkldnn);
DECLARE_string(config); DECLARE_string(config);
DECLARE_string(nics); DECLARE_string(nics);
DEFINE_string(config_file_a, "", "config of one network to compare"); DEFINE_string(config_file_a, "", "config of one network to compare");
DEFINE_string(config_file_b, "", "config of another network to compare"); DEFINE_string(config_file_b, "", "config of another network to compare");
DEFINE_bool(use_mkldnn_a, false, "whether to use mkldnn to run config_file_a");
DEFINE_bool(use_mkldnn_b, false, "whether to use mkldnn to run config_file_b");
DEFINE_bool(need_high_accuracy, DEFINE_bool(need_high_accuracy,
false, false,
"whether need to run in double accuracy"); "whether need to run in double accuracy");
...@@ -131,12 +128,6 @@ void compareGradient(ComData& comDataA, ComData& comDataB) { ...@@ -131,12 +128,6 @@ void compareGradient(ComData& comDataA, ComData& comDataB) {
matA.getWidth()); matA.getWidth());
} }
if (FLAGS_use_mkldnn_a || FLAGS_use_mkldnn_b) {
// some format of mkldnn parameter is different with cpu
// test_MKLDNN will check the parameters
return;
}
vector<ParameterPtr>& parametersA = comDataA.parameters; vector<ParameterPtr>& parametersA = comDataA.parameters;
vector<ParameterPtr>& parametersB = comDataB.parameters; vector<ParameterPtr>& parametersB = comDataB.parameters;
...@@ -176,12 +167,10 @@ void compareGradient(ComData& comDataA, ComData& comDataB) { ...@@ -176,12 +167,10 @@ void compareGradient(ComData& comDataA, ComData& comDataB) {
TEST(Trainer, create) { TEST(Trainer, create) {
ComData dataA; ComData dataA;
FLAGS_use_mkldnn = FLAGS_use_mkldnn_a;
calcGradient(dataA, FLAGS_config_file_a); calcGradient(dataA, FLAGS_config_file_a);
LOG(INFO) << "\n\nforwardBackward of Network A is finished\n\n"; LOG(INFO) << "\n\nforwardBackward of Network A is finished\n\n";
ComData dataB; ComData dataB;
FLAGS_use_mkldnn = FLAGS_use_mkldnn_b;
calcGradient(dataB, FLAGS_config_file_b); calcGradient(dataB, FLAGS_config_file_b);
LOG(INFO) << "\n\nforwardBackward of the Network B is finished\n\n"; LOG(INFO) << "\n\nforwardBackward of the Network B is finished\n\n";
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册