cxx_api_test.cc 3.0 KB
Newer Older
S
superjomn 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/lite/api/cxx_api.h"
#include <gtest/gtest.h>
17
#include "paddle/fluid/lite/core/mir/passes.h"
S
superjomn 已提交
18
#include "paddle/fluid/lite/core/op_executor.h"
S
superjomn 已提交
19 20 21 22 23
#include "paddle/fluid/lite/core/op_registry.h"

namespace paddle {
namespace lite {

S
superjomn 已提交
24 25
TEST(CXXApi, test) {
  lite::Predictor predictor;
S
superjomn 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38
#ifndef LITE_WITH_CUDA
  std::vector<Place> valid_places({Place{TARGET(kHost), PRECISION(kFloat)}});
#else
  std::vector<Place> valid_places({
      Place{TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kNCHW)},
      Place{TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW)},
      Place{TARGET(kCUDA), PRECISION(kAny), DATALAYOUT(kNCHW)},
      Place{TARGET(kHost), PRECISION(kAny), DATALAYOUT(kNCHW)},
      Place{TARGET(kCUDA), PRECISION(kAny), DATALAYOUT(kAny)},
      Place{TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny)},
  });
#endif

S
Superjomn 已提交
39
  predictor.Build("/home/chunwei/project/models/model2",
S
superjomn 已提交
40
                  Place{TARGET(kCUDA), PRECISION(kFloat)}, valid_places);
41 42 43

  auto* input_tensor = predictor.GetInput(0);
  input_tensor->Resize({100, 100});
44 45 46 47 48 49 50 51
  auto* data = input_tensor->mutable_data<float>();
  for (int i = 0; i < 100 * 100; i++) {
    data[i] = i;
  }

  LOG(INFO) << "input " << input_tensor;
  LOG(INFO) << "input " << *input_tensor;

52
  predictor.Run();
53 54 55 56 57 58 59

  auto* out = predictor.GetOutput(0);
  LOG(INFO) << out << " memory size " << out->memory_size();
  LOG(INFO) << "out " << out->data<float>()[0];
  LOG(INFO) << "out " << out->data<float>()[1];
  LOG(INFO) << "dims " << out->dims();
  LOG(INFO) << "out " << *out;
S
superjomn 已提交
60 61
}

S
Superjomn 已提交
62 63 64 65 66 67 68 69 70
TEST(CXXApi, save_model) {
  lite::Predictor predictor;
  std::vector<Place> valid_places({Place{TARGET(kHost), PRECISION(kFloat)}});
  predictor.Build("/home/chunwei/project/models/model2",
                  Place{TARGET(kCUDA), PRECISION(kFloat)}, valid_places);

  predictor.SaveModel("./optimized_model");
}

S
superjomn 已提交
71 72 73 74 75 76
}  // namespace lite
}  // namespace paddle

USE_LITE_OP(mul);
USE_LITE_OP(fc);
USE_LITE_OP(scale);
77 78
USE_LITE_OP(feed);
USE_LITE_OP(fetch);
S
superjomn 已提交
79 80 81 82 83 84 85 86 87 88 89 90
USE_LITE_OP(io_copy);
USE_LITE_KERNEL(fc, kHost, kFloat, kNCHW, def);
USE_LITE_KERNEL(mul, kHost, kFloat, kNCHW, def);
USE_LITE_KERNEL(scale, kHost, kFloat, kNCHW, def);
USE_LITE_KERNEL(feed, kHost, kAny, kAny, def);
USE_LITE_KERNEL(fetch, kHost, kAny, kAny, def);

#ifdef LITE_WITH_CUDA
USE_LITE_KERNEL(mul, kCUDA, kFloat, kNCHW, def);
USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, host_to_device);
USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, device_to_host);
#endif