test_recurrent_machine_generation.cpp 5.1 KB
Newer Older
Z
zhangjinchao01 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <fstream>

#include <paddle/utils/PythonUtil.h>
#include <paddle/trainer/Trainer.h>

#include <gtest/gtest.h>

using namespace paddle;  // NOLINT
using namespace std;     // NOLINT

static const string& CONFIG_FILE = "trainer/tests/sample_trainer_rnn_gen.conf";
26 27
static const string& NEST_CONFIG_FILE =
    "trainer/tests/sample_trainer_nest_rnn_gen.conf";
Z
zhangjinchao01 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
static const string& OUTPUT_DIR = "trainer/tests/dump_text.test";
static string modelDir = "trainer/tests/rnn_gen_test_model_dir/t1";  // NOLINT
static string expectFile =                                           // NOLINT
    "trainer/tests/rnn_gen_test_model_dir/r1.test";                  // NOLINT

P_DECLARE_string(config_args);

vector<float> readRetFile(const string& fname) {
  ifstream inFile(fname);
  float ret;
  vector<float> nums;
  while (inFile >> ret) {
    nums.push_back(ret);
  }
  return nums;
}

void checkOutput(const string& expRetFile) {
  vector<float> rets = readRetFile(OUTPUT_DIR);
  vector<float> expRets = readRetFile(expRetFile);
  EXPECT_EQ(rets.size(), expRets.size());
  for (size_t i = 0; i < rets.size(); i++) {
    EXPECT_FLOAT_EQ(rets[i], expRets[i]);
  }
}

54 55
void prepareInArgs(vector<Argument>& inArgs, const size_t batchSize,
                   bool useGpu, bool hasSubseq) {
Z
zhangjinchao01 已提交
56 57 58 59
  inArgs.clear();
  // sentence id
  Argument sentId;
  sentId.value = nullptr;
60 61 62 63 64 65 66 67 68
  if (hasSubseq) {
    // as there is only one sequence, there is only one label.
    IVector::resizeOrCreate(sentId.ids, 1, useGpu);
    sentId.ids->setElement(0, 0);
  } else {
    // as there is batchSize word, there is batchSize label.
    IVector::resizeOrCreate(sentId.ids, batchSize, useGpu);
    for (size_t i = 0; i < batchSize; ++i) sentId.ids->setElement(i, i);
  }
Z
zhangjinchao01 已提交
69 70 71 72 73 74
  inArgs.emplace_back(sentId);

  // a dummy layer to decide batch size
  Argument dummyInput;
  dummyInput.value = Matrix::create(batchSize, 2, false, useGpu);
  dummyInput.value->randomizeUniform();
75 76 77 78 79 80 81 82 83 84 85 86
  if (hasSubseq) {
    // generate one sequence with batchSize subsequence,
    // and each subsequence has only one word.
    dummyInput.sequenceStartPositions = ICpuGpuVector::create(2, false);
    int* buf = dummyInput.sequenceStartPositions->getMutableData(false);
    dummyInput.subSequenceStartPositions =
        ICpuGpuVector::create(batchSize + 1, false);
    int* subBuf = dummyInput.subSequenceStartPositions->getMutableData(false);
    buf[0] = 0;
    buf[1] = batchSize;
    for (size_t i = 0; i < batchSize + 1; i++) subBuf[i] = i;
  }
Z
zhangjinchao01 已提交
87 88 89
  inArgs.emplace_back(dummyInput);
}

90 91
void testGeneration(const string& configFile, bool useGpu, bool hasSubseq,
                    const string& expRetFile) {
Z
zhangjinchao01 已提交
92
  FLAGS_use_gpu = useGpu;
93
  auto config = std::make_shared<TrainerConfigHelper>(configFile);
Z
zhangjinchao01 已提交
94 95 96 97 98
  unique_ptr<GradientMachine> gradientMachine(GradientMachine::create(*config));
  gradientMachine->loadParameters(modelDir);
  vector<Argument> inArgs(2);

  const size_t batchSize = 15;
99
  prepareInArgs(inArgs, batchSize, useGpu, hasSubseq);
Z
zhangjinchao01 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
  vector<Argument> outArgs;
  unique_ptr<Evaluator> testEvaluator(gradientMachine->makeEvaluator());
  testEvaluator->start();
  gradientMachine->forward(inArgs, &outArgs, PASS_TEST);
  gradientMachine->eval(testEvaluator.get());
  testEvaluator->finish();
  checkOutput(expRetFile);
}

#ifndef PADDLE_TYPE_DOUBLE

TEST(RecurrentGradientMachine, test_generation) {
#ifdef PADDLE_ONLY_CPU
  const auto useGpuConfs = {false};
#else
  const auto useGpuConfs = {true, false};
#endif
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
  auto testGen = [&](const string& configFile, bool hasSubseq,
                     const string& expRetFile, bool beam_search) {
    FLAGS_config_args = beam_search ? "beam_search=1" : "beam_search=0";
    for (auto useGpu : useGpuConfs) {
      testGeneration(configFile, useGpu, hasSubseq, expRetFile);
    }
  };
  testGen(CONFIG_FILE, false, expectFile + ".nobeam", false);  // no beam search
  testGen(CONFIG_FILE, false, expectFile + ".beam", true);     // beam search
  // In hierarchical RNN, beam search and one way search are only in inner-RNN,
  // outer-RNN will concat the generated inner-results (first for beam search)
  // from inner-RNN. Thus, they have the same outer-results.
  testGen(NEST_CONFIG_FILE, true, expectFile + ".nest",
          false);  // no beam search
  testGen(NEST_CONFIG_FILE, true, expectFile + ".nest", true);  // beam search
Z
zhangjinchao01 已提交
132 133 134 135 136 137 138 139 140 141 142 143 144 145
}
#endif

int main(int argc, char** argv) {
  initMain(argc, argv);
  initPython(argc, argv);
  CHECK(argc == 1 || argc == 3);
  if (argc == 3) {
    modelDir = argv[1];
    expectFile = argv[2];
  }
  testing::InitGoogleTest(&argc, argv);
  return RUN_ALL_TESTS();
}