MKLDNNTester.cpp 19.1 KB
Newer Older
1
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.
T
tensor-tang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "MKLDNNTester.h"
X
Xin Pan 已提交
16 17
#include "paddle/legacy/gserver/layers/MKLDNNBase.h"
#include "paddle/legacy/gserver/layers/MKLDNNLayer.h"
18
#include "paddle/trainer/Trainer.h"
T
tensor-tang 已提交
19 20 21 22

namespace paddle {

// init data layer and test layer of both dnn and reference
23
void MKLDNNTester::reset(const TestConfig& dnn,
T
tensor-tang 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
                         const TestConfig& ref,
                         size_t batchSize) {
  const bool trans = false;
  const bool useGpu = false;

  // clear
  configs_.clear();
  layerNames_.clear();
  dataLayers_.clear();
  datas_.clear();
  layerMaps_.clear();
  parameters_.clear();
  testLayers_.clear();

  // resize
  configs_.resize(NUM);
  layerNames_.resize(NUM);
  dataLayers_.resize(NUM);
  datas_.resize(NUM);
  layerMaps_.resize(NUM);
  parameters_.resize(NUM);
  testLayers_.resize(NUM);

  // reset configs and layer names
  configs_[DNN] = dnn;
  configs_[REF] = ref;
  layerNames_[DNN] = "mkldnn";     // the first is mkldnn layer
  layerNames_[REF] = "reference";  // second is reference layer

  // reset others
  for (size_t i = 0; i < NUM; ++i) {
    configs_[i].layerConfig.set_name(layerNames_[i]);
    initDataLayer(configs_[i],
                  &(dataLayers_[i]),
                  &(datas_[i]),
                  &(layerMaps_[i]),
                  layerNames_[i],
                  batchSize,
                  trans,
                  useGpu);
    initTestLayer(
        configs_[i], &(layerMaps_[i]), &(parameters_[i]), &(testLayers_[i]));
  }
  refLayer_ = testLayers_[REF];
68
  dnnLayer_ = testLayers_[DNN];
T
tensor-tang 已提交
69 70 71
  EXPECT_EQ(dataLayers_[DNN].size(), dataLayers_[REF].size());
  EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size());
  setInputImgSize();
72 73 74 75 76 77 78

  // for comparison with Paddle reference results,
  // need manually add cpu device output for test
  MKLDNNLayerPtr dnnLayer = std::dynamic_pointer_cast<MKLDNNLayer>(dnnLayer_);
  if (dnnLayer) {
    dnnLayer->addOutputArgument(CPU_DEVICE);
  }
T
tensor-tang 已提交
79 80
}

81
void MKLDNNTester::setInputImgSize() {
T
tensor-tang 已提交
82 83 84 85 86 87 88 89 90 91
  for (size_t n = 0; n < dataLayers_.size(); ++n) {
    for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
      // TODO(TJ): fix me when concat and elewise ready
      dataLayers_[n][i]->getOutput().setFrameHeight(ih_);
      dataLayers_[n][i]->getOutput().setFrameWidth(iw_);
    }
  }
}

// init randome parameters of ref, and copy to mkldnn
92
void MKLDNNTester::randomWgtDatas() {
T
tensor-tang 已提交
93
  EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size());
94
  const bool isBN = refLayer_->getType() == "batch_norm";
T
tensor-tang 已提交
95 96 97 98
  for (size_t i = 0; i < parameters_[REF].size(); ++i) {
    const VectorPtr& dnnValue = parameters_[DNN][i]->getBuf(PARAMETER_VALUE);
    const VectorPtr& refValue = parameters_[REF][i]->getBuf(PARAMETER_VALUE);
    parameters_[REF][i]->randomize();
99 100 101 102 103
    if (isBN && i == 2) {
      // this param is moving average in batch norm, which must larger than 0
      real offset = fabs(refValue->getMin()) + 1.0;
      refValue->add(offset);
    }
T
tensor-tang 已提交
104 105
    dnnValue->copyFrom(*refValue);

106
    VLOG(MKLDNN_TESTS) << "Random weight " << parameters_[DNN][i]->getName();
T
tensor-tang 已提交
107 108 109 110 111
    printVector(dnnValue);
  }
}

// random botdata of ref layer and copy same to mkldnn
112
void MKLDNNTester::randomBotDatas() {
T
tensor-tang 已提交
113 114 115 116 117
  CHECK_EQ(dataLayers_.size(), NUM);
  for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) {
    dataLayers_[REF][i]->getOutputValue()->randomizeUniform();
    dataLayers_[DNN][i]->getOutputValue()->copyFrom(
        *(dataLayers_[REF][i]->getOutputValue()));
118
    VLOG(MKLDNN_TESTS) << "Random Foward, InputValue " << i;
T
tensor-tang 已提交
119 120 121 122
    printMatrix(dataLayers_[REF][i]->getOutputValue());
  }
}

123
void MKLDNNTester::randomTopDiffs() {
T
tensor-tang 已提交
124
  refLayer_->getOutputGrad()->randomizeUniform();
125 126
  dnnLayer_->getOutput(CPU_DEVICE)
      .grad->copyFrom(*(refLayer_->getOutputGrad()));
127
  VLOG(MKLDNN_TESTS) << "Random Backward, OutputGrad";
T
tensor-tang 已提交
128 129 130
  printMatrix(refLayer_->getOutputGrad());
}

131
void MKLDNNTester::checkForward() {
132
  VLOG(MKLDNN_TESTS) << "Check Forward";
133
  printTopDatas();
134
  double delta =
135
      compareMatrix(refLayer_->getOutputValue(), dnnLayer_->getOutputValue());
T
tensor-tang 已提交
136 137 138
  EXPECT_LE(fabs(delta), eps_);
}

139
void MKLDNNTester::checkBackwardData() {
140
  VLOG(MKLDNN_TESTS) << "Check Backward Data";
141
  const bool isBN = refLayer_->getType() == "batch_norm";
T
tensor-tang 已提交
142 143 144
  for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) {
    const MatrixPtr& dnnDiff = dataLayers_[DNN][i]->getOutputGrad();
    const MatrixPtr& refDiff = dataLayers_[REF][i]->getOutputGrad();
145
    VLOG(MKLDNN_ALL) << "MKLDNN Backward Result: InputGrad " << i;
T
tensor-tang 已提交
146
    printMatrix(dnnDiff);
147
    VLOG(MKLDNN_ALL) << "Reference Backward Result: InputGrad " << i;
T
tensor-tang 已提交
148 149
    printMatrix(refDiff);

150
    double delta = compareMatrix(refDiff, dnnDiff);
T
tensor-tang 已提交
151
    EXPECT_LE(fabs(delta), eps_);
152 153 154 155 156
    if (isBN) {
      // the other two inputs in batch norm are for moving mean and var
      // do not have grad to compare
      break;
    }
T
tensor-tang 已提交
157 158 159
  }
}

160
void MKLDNNTester::checkBackwardWgts() {
161
  VLOG(MKLDNN_TESTS) << "Check Backward Weight";
T
tensor-tang 已提交
162 163 164 165
  CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size());
  vector<VectorPtr> dnnWgts;  // used to temply save mkldnn weights
  saveWgt(parameters_[DNN], dnnWgts);

166 167 168 169
  MKLDNNLayerPtr dnnLayer = std::dynamic_pointer_cast<MKLDNNLayer>(dnnLayer_);
  if (dnnLayer) {
    dnnLayer->convertWeightsToPaddle();
  }
T
tensor-tang 已提交
170 171 172
  for (size_t i = 0; i < parameters_[DNN].size(); ++i) {
    const VectorPtr& dnn = parameters_[DNN][i]->getBuf(PARAMETER_VALUE);
    const VectorPtr& ref = parameters_[REF][i]->getBuf(PARAMETER_VALUE);
173 174
    VLOG(MKLDNN_ALL) << "MKLDNN Result: weight value"
                     << parameters_[DNN][i]->getName();
T
tensor-tang 已提交
175
    printVector(dnn);
176 177
    VLOG(MKLDNN_ALL) << "Reference Result: weight value "
                     << parameters_[REF][i]->getName();
T
tensor-tang 已提交
178 179
    printVector(ref);

180
    double delta = compareVector(ref, dnn);
T
tensor-tang 已提交
181 182 183
    EXPECT_LE(fabs(delta), eps_);
  }

T
tensor-tang 已提交
184
  VLOG(MKLDNN_ALL) << "Restore dnn weights before comapre";
T
tensor-tang 已提交
185 186 187
  restoreWgt(dnnWgts, parameters_[DNN]);
}

188
void MKLDNNTester::saveWgt(const vector<ParameterPtr>& from,
T
tensor-tang 已提交
189 190 191 192 193 194 195 196 197 198
                           vector<VectorPtr>& to) {
  const bool useGpu = false;
  to.resize(from.size());
  for (size_t i = 0; i < to.size(); ++i) {
    const VectorPtr& wgt = from[i]->getBuf(PARAMETER_VALUE);
    to[i] = Vector::create(wgt->getSize(), useGpu);
    to[i]->copyFrom(*wgt);
  }
}

199
void MKLDNNTester::restoreWgt(const vector<VectorPtr>& from,
T
tensor-tang 已提交
200 201 202 203 204 205 206 207 208
                              vector<ParameterPtr>& to) {
  CHECK_EQ(from.size(), to.size());
  for (size_t i = 0; i < from.size(); ++i) {
    const VectorPtr& wgt = to[i]->getBuf(PARAMETER_VALUE);
    wgt->copyFrom(*from[i]);
  }
}

// clear parameters grad
209 210
void MKLDNNTester::clearWgtDiffs(size_t id) {
  CHECK_LE(id, parameters_.size());
T
tensor-tang 已提交
211
  for (size_t n = 0; n < parameters_.size(); ++n) {
212 213 214 215 216 217
    if (id == n || id == parameters_.size()) {
      for (size_t i = 0; i < parameters_[n].size(); ++i) {
        const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT);
        if (grad) {
          grad->zeroMem();
        }
T
tensor-tang 已提交
218 219 220 221 222
      }
    }
  }
}

223 224
void MKLDNNTester::clearBotDiffs(size_t id) {
  CHECK_LE(id, dataLayers_.size());
T
tensor-tang 已提交
225
  for (size_t n = 0; n < dataLayers_.size(); ++n) {
226 227 228 229 230
    if (id == n || id == dataLayers_.size()) {
      // clear inputs layers of this specific layer
      for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
        dataLayers_[n][i]->getOutputGrad()->zeroMem();
      }
T
tensor-tang 已提交
231 232 233 234
    }
  }
}

235 236
void MKLDNNTester::clearTopDatas(size_t id) {
  CHECK_LE(id, testLayers_.size());
T
tensor-tang 已提交
237
  for (size_t i = 0; i < testLayers_.size(); ++i) {
238 239 240
    if (id == i || id == testLayers_.size()) {
      testLayers_[i]->getOutputValue()->zeroMem();
    }
T
tensor-tang 已提交
241 242 243
  }
}

244
void MKLDNNTester::printTopDatas() {
T
tensor-tang 已提交
245 246 247 248 249
  if (!log_) {
    return;
  }

  for (int n = 0; n < NUM; ++n) {
250 251
    VLOG(MKLDNN_ALL) << testLayers_[n]->getType()
                     << " Forward Result: OutputValue";
T
tensor-tang 已提交
252 253 254 255
    printMatrix(testLayers_[n]->getOutputValue());
  }
}

256
void MKLDNNTester::printMatrix(const MatrixPtr& m) {
T
tensor-tang 已提交
257 258 259
  if (!log_) {
    return;
  }
T
tensor-tang 已提交
260 261 262

  std::ostringstream ostr;
  m->print(ostr);
263
  VLOG(MKLDNN_ALL) << std::endl << ostr.str();
T
tensor-tang 已提交
264 265
}

266
void MKLDNNTester::printVector(const VectorPtr& v) {
T
tensor-tang 已提交
267 268 269 270
  if (!log_) {
    return;
  }

T
tensor-tang 已提交
271 272
  std::ostringstream ostr;
  v->print(ostr, v->getSize());
273
  VLOG(MKLDNN_ALL) << std::endl << ostr.str();
T
tensor-tang 已提交
274 275
}

276 277
double MKLDNNTester::getDelta(const real* refer,
                              const real* value,
T
tensor-tang 已提交
278 279 280 281 282 283
                              size_t len,
                              const float failRate,
                              const float thres) {
  double delta = 0, sum = 0;
  int failCnt = 0;
  const double eps = 1e-5;
284
  double maxRatio = 0;
T
tensor-tang 已提交
285
  for (size_t i = 0; i < len; ++i) {
286 287 288
    double ref = fabs(refer[i]);
    double val = fabs(value[i]);
    double diff = fabs(refer[i] - value[i]);
T
tensor-tang 已提交
289 290
    delta += diff;
    sum += ref;
291 292 293 294 295 296
    if (ref < eps && val < eps) {  // both values are very small
      continue;
    }
    double ratio = diff / ref;
    if (ratio > thres) {
      maxRatio = std::max(maxRatio, ratio);
T
tensor-tang 已提交
297 298 299 300
      failCnt++;
    }
  }
  EXPECT_FALSE(std::isinf(sum));
301
  EXPECT_FALSE(std::isnan(sum));
T
tensor-tang 已提交
302
  EXPECT_FALSE(std::isnan(delta));
T
tensor-tang 已提交
303 304
  VLOG(MKLDNN_ALL) << "reference avg data: " << sum / len
                   << ", delta: " << delta / sum << ", failCnt:" << failCnt;
305 306
  double res = sum > eps ? delta / sum : eps;
  return (failCnt / (float)len) > failRate ? maxRatio : res;
T
tensor-tang 已提交
307 308
}

309
double MKLDNNTester::compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2) {
T
tensor-tang 已提交
310 311 312 313
  CHECK_EQ(m1->getElementCnt(), m2->getElementCnt());
  return getDelta(m1->getData(), m2->getData(), m1->getElementCnt());
}

314
double MKLDNNTester::compareVector(const VectorPtr& v1, const VectorPtr& v2) {
T
tensor-tang 已提交
315 316 317 318
  CHECK_EQ(v1->getSize(), v2->getSize());
  return getDelta(v1->getData(), v2->getData(), v1->getSize());
}

319
void MKLDNNTester::runOnce() {
T
tensor-tang 已提交
320 321
  // test forward
  randomBotDatas();
322 323
  dnnLayer_->forward(passType_);
  refLayer_->forward(passType_);
T
tensor-tang 已提交
324 325
  checkForward();

326 327 328 329
  if (passType_ == PASS_TEST) {
    return;
  }

T
tensor-tang 已提交
330
  // test backward
331 332 333 334
  // simple updater
  UpdateCallback updateCallback = [](Parameter* para) {
    auto& grad = para->getBuf(PARAMETER_GRADIENT);
    auto& value = para->getBuf(PARAMETER_VALUE);
335
    real lr = 1e-2;
336
    value->add(*grad, lr);
337
    grad->zeroMem();
338
  };
T
tensor-tang 已提交
339
  randomTopDiffs();
340 341
  dnnLayer_->backward(updateCallback);
  refLayer_->backward(updateCallback);
T
tensor-tang 已提交
342 343 344 345 346
  checkBackwardData();
  checkBackwardWgts();

  // clear buffers
  // ref code will addto the diff, dnn code will writeto it
347
  // and clearTopDatas(REF) should be coverd by ref layers
T
tensor-tang 已提交
348
  clearBotDiffs(REF);
349
  clearWgtDiffs(REF);
350 351 352 353
  // it is necessary to clear bottom diffs when only activation is dnn type
  if (configs_[DNN].layerConfig.active_type().compare(0, 7, "mkldnn_") == 0) {
    clearBotDiffs(DNN);
  }
T
tensor-tang 已提交
354 355
}

356
void MKLDNNTester::run(const TestConfig& dnn,
T
tensor-tang 已提交
357 358 359 360
                       const TestConfig& ref,
                       size_t batchSize,
                       size_t inputImgH,
                       size_t inputImgW,
361
                       PassType passType,
362
                       bool printDetails,
T
tensor-tang 已提交
363
                       size_t iter,
364
                       float epsilon) {
365 366 367 368 369 370 371 372 373 374 375 376 377
  CHECK(dnn.layerConfig.type().compare(0, 7, "mkldnn_") == 0 ||
        dnn.layerConfig.active_type().compare(0, 7, "mkldnn_") == 0)
      << "should be MKLDNN layer or MKLDNN activation";
  if (dnn.layerConfig.type() == ref.layerConfig.type()) {
    VLOG(MKLDNN_TESTS) << "Test MKLDNN functionality: "
                       << dnn.layerConfig.active_type() << " vs "
                       << ref.layerConfig.active_type();
  } else {
    VLOG(MKLDNN_TESTS) << "Test MKLDNN functionality: "
                       << dnn.layerConfig.type() << " vs "
                       << ref.layerConfig.type();
  }

T
tensor-tang 已提交
378 379
  ih_ = inputImgH;
  iw_ = inputImgW;
380
  passType_ = passType;
381
  log_ = printDetails;
T
tensor-tang 已提交
382 383 384
  iter_ = iter;
  eps_ = epsilon;

T
tensor-tang 已提交
385
  // Firstly test mkldnn init from PARAM_FORMAT_ORIGINAL weight
T
tensor-tang 已提交
386
  reset(dnn, ref, batchSize);
T
tensor-tang 已提交
387 388 389
  randomWgtDatas();
  clearWgtDiffs();
  clearBotDiffs();
T
tensor-tang 已提交
390
  for (size_t i = 0; i < iter_; ++i) {
T
tensor-tang 已提交
391
    VLOG(MKLDNN_TESTS) << "Check Iteration " << i;
T
tensor-tang 已提交
392 393
    runOnce();
  }
T
tensor-tang 已提交
394

T
tensor-tang 已提交
395 396 397 398 399
  if (parameters_[DNN].empty()) {
    // has no paramters
    return;
  }

T
tensor-tang 已提交
400 401 402 403
  // After run some iterations, the mkldnn weight has been stored in dnnLayer
  // and we can also get the mkldnn weight parameter header format.
  // Weight parameter should always be index 0 (and bias index 1).
  // TODO(TJ): should also consider mean and var format when batchnorm ready
T
tensor-tang 已提交
404 405 406 407 408 409 410
  int dnnWgtFmt = parameters_[DNN][0]->getHeaderFormat();
  int refWgtFmt = parameters_[REF][0]->getHeaderFormat();
  if (dnnWgtFmt == refWgtFmt) {
    // weight format are equal, so no need check more
    return;
  }

T
tensor-tang 已提交
411
  // then save the weights and restart again
T
tensor-tang 已提交
412 413 414 415
  vector<VectorPtr> dnnWgts, refWgts;
  CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size());
  saveWgt(parameters_[DNN], dnnWgts);
  saveWgt(parameters_[REF], refWgts);
T
tensor-tang 已提交
416

T
tensor-tang 已提交
417
  // restart again with dnn weight format
T
tensor-tang 已提交
418
  reset(dnn, ref, batchSize);
T
tensor-tang 已提交
419 420
  // TODO(TJ): should also considerate mean and var format when batchnorm ready
  parameters_[DNN][0]->setHeaderFormat(dnnWgtFmt);
T
tensor-tang 已提交
421

T
tensor-tang 已提交
422 423 424 425 426
  // restore wgt
  restoreWgt(dnnWgts, parameters_[DNN]);
  restoreWgt(refWgts, parameters_[REF]);
  clearWgtDiffs();
  clearBotDiffs();
T
tensor-tang 已提交
427

T
tensor-tang 已提交
428
  for (size_t i = 0; i < iter_; ++i) {
T
tensor-tang 已提交
429
    VLOG(MKLDNN_TESTS) << "Check Iteration " << i;
T
tensor-tang 已提交
430 431 432 433
    runOnce();
  }
}

434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
void MKLDNNTester::initArgument(DataIn& data,
                                const std::string& configPath,
                                const size_t iter) {
  TrainerConfigHelper config(configPath);
  size_t batchSize = config.getOptConfig().batch_size();
  data.inArgs.resize(iter);
  data.outGrads.resize(iter);
  data.paraValues.clear();
  for (const auto& layer_name : config.getModelConfig().input_layer_names()) {
    auto layer_config = std::find_if(config.getModelConfig().layers().begin(),
                                     config.getModelConfig().layers().end(),
                                     [=](const LayerConfig& layer_config) {
                                       return layer_config.name() == layer_name;
                                     });
    CHECK(layer_config != config.getModelConfig().layers().end());

    size_t layerSize = layer_config->size();
    for (size_t i = 0; i < iter; ++i) {
      Argument arg;
      arg.value = Matrix::create(batchSize, layerSize, false, false);
      arg.grad = Matrix::create(batchSize, layerSize, false, false);
      arg.value->randomizeUniform();
      arg.value->add(-0.5);
      arg.value->sigmoid(*arg.value);
      arg.grad->zeroMem();
      arg.ids = VectorT<int>::create(batchSize, false);
      arg.ids->rand(layerSize);
      generateSequenceStartPositions(batchSize, arg.sequenceStartPositions);
      data.inArgs[i].push_back(arg);
    }
  }

  for (const auto& layer_name : config.getModelConfig().output_layer_names()) {
    auto layer_config = std::find_if(config.getModelConfig().layers().begin(),
                                     config.getModelConfig().layers().end(),
                                     [=](const LayerConfig& layer_config) {
                                       return layer_config.name() == layer_name;
                                     });
    CHECK(layer_config != config.getModelConfig().layers().end());

    size_t layerSize = layer_config->size();
    for (size_t i = 0; i < iter; ++i) {
      MatrixPtr grad = Matrix::create(batchSize, layerSize, false, false);
      grad->randomizeUniform();
      data.outGrads[i].push_back(grad);
    }
  }

  for (const auto& para_config : config.getModelConfig().parameters()) {
    VectorPtr value = Vector::create(para_config.size(), false);
    value->randnorm(0, 2);
    data.paraValues.push_back(value);
  }
}

void MKLDNNTester::getOutResult(const std::string& configPath,
                                DataIn& in,
                                DataOut& out,
                                bool use_mkldnn,
                                size_t iter) {
  FLAGS_use_gpu = false;
  FLAGS_use_mkldnn = use_mkldnn;
  *ThreadLocalRand::getSeed() = 1;
  srand(1);

  Trainer trainer;
  auto config = std::make_shared<TrainerConfigHelper>(configPath);
  trainer.init(config, false);
  auto gradientMachine = trainer.getGradientMachine();
  std::vector<ParameterPtr> parameters = gradientMachine->getParameters();
  for (size_t i = 0; i < in.paraValues.size(); i++) {
    parameters[i]->getBuf(PARAMETER_VALUE)->copyFrom(*in.paraValues[i]);
  }
  UpdateCallback simpleUpdate = [](Parameter* para) {
    auto& grad = para->getBuf(PARAMETER_GRADIENT);
    auto& value = para->getBuf(PARAMETER_VALUE);
    real lr = 1e-2;
    value->add(*grad, lr);
    grad->zeroMem();
  };

  vector<Argument> outArgs;
  gradientMachine->start();
  out.outValues.clear();
  out.paraValues.clear();
  for (size_t i = 0; i < iter; ++i) {
    VLOG(MKLDNN_TESTS) << "runing iteration " << i;
    gradientMachine->forward(in.inArgs[i], &outArgs, PASS_TRAIN);
    // save forward result
    for (size_t k = 0; k < outArgs.size(); k++) {
524 525 526 527 528 529 530 531 532 533
      const MatrixPtr& src = outArgs[k].value;
      MatrixPtr dst =
          Matrix::create(src->getHeight(), src->getWidth(), false, false);
      if (typeid(*src) == typeid(MKLDNNMatrix)) {
        MKLDNNMatrixPtr dnnSrc = std::dynamic_pointer_cast<MKLDNNMatrix>(src);
        dnnSrc->copyTo(*dst);
      } else {
        dst->copyFrom(*src);
      }
      out.outValues.push_back(dst);
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
    }

    // random backward input
    for (size_t k = 0; k < outArgs.size(); k++) {
      outArgs[k].grad->copyFrom(*in.outGrads[i][k]);
    }
    gradientMachine->backward(simpleUpdate);
  }
  gradientMachine->finish();

  // save param value
  for (size_t i = 0; i < in.paraValues.size(); i++) {
    VectorPtr val = Vector::create(
        parameters[i]->getBuf(PARAMETER_VALUE)->getSize(), false);
    val->copyFrom(*parameters[i]->getBuf(PARAMETER_VALUE));
    out.paraValues.push_back(val);
  }
}

void MKLDNNTester::compareResult(DataOut& ref, DataOut& dnn, float eps) {
  CHECK_EQ(ref.outValues.size(), dnn.outValues.size());
  CHECK_EQ(ref.paraValues.size(), dnn.paraValues.size());
  for (size_t i = 0; i < ref.outValues.size(); i++) {
557
    VLOG(MKLDNN_TESTS) << "compare value index: " << i;
558 559 560
    EXPECT_LE(fabs(compareMatrix(ref.outValues[i], dnn.outValues[i])), eps);
  }
  for (size_t i = 0; i < ref.paraValues.size(); i++) {
561
    VLOG(MKLDNN_TESTS) << "compare param index: " << i;
562 563 564 565
    EXPECT_LE(fabs(compareVector(ref.paraValues[i], dnn.paraValues[i])), eps);
  }
}

566 567 568
void MKLDNNTester::runNetTest(const std::string& configPath,
                              size_t iter,
                              float eps) {
569 570 571
  DataIn in;
  initArgument(in, configPath, iter);
  DataOut outCpu, outDnn;
572
  VLOG(MKLDNN_TESTS) << "runing cpu network";
573
  getOutResult(configPath, in, outCpu, false, iter);
574
  VLOG(MKLDNN_TESTS) << "runing mkldnn network";
575 576 577 578 579
  getOutResult(configPath, in, outDnn, true, iter);

  compareResult(outCpu, outDnn, eps);
}

T
tensor-tang 已提交
580
}  //  namespace paddle