cxx_api_bin.cc 3.2 KB
Newer Older
S
Superjomn 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

S
superjomn 已提交
15
#include "paddle/fluid/lite/api/cxx_api.h"
16

17
// #ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
S
superjomn 已提交
18
#include "paddle/fluid/lite/core/mir/passes.h"
19
// #endif
20

S
superjomn 已提交
21 22 23 24 25 26
#include "paddle/fluid/lite/core/op_registry.h"

namespace paddle {
namespace lite {

void Run(const char* model_dir) {
27 28 29
#ifdef LITE_WITH_ARM
  DeviceInfo::Init();
#endif
30
  lite::ExecutorLite predictor;
31 32
  std::vector<Place> valid_places({Place{TARGET(kHost), PRECISION(kFloat)},
                                   Place{TARGET(kARM), PRECISION(kFloat)}});
S
superjomn 已提交
33

34
  predictor.Build(model_dir, Place{TARGET(kARM), PRECISION(kFloat)},
S
superjomn 已提交
35 36 37
                  valid_places);

  auto* input_tensor = predictor.GetInput(0);
38
  input_tensor->Resize(DDim(std::vector<DDim::value_type>({1, 3, 224, 224})));
S
superjomn 已提交
39
  auto* data = input_tensor->mutable_data<float>();
40
  for (int i = 0; i < input_tensor->dims().production(); i++) {
S
superjomn 已提交
41 42 43 44 45 46 47 48 49 50
    data[i] = i;
  }

  predictor.Run();

  auto* out = predictor.GetOutput(0);
  LOG(INFO) << out << " memory size " << out->data_size();
  LOG(INFO) << "out " << out->data<float>()[0];
  LOG(INFO) << "out " << out->data<float>()[1];
  LOG(INFO) << "dims " << out->dims();
51
  LOG(INFO) << "out data size: " << out->data_size();
S
superjomn 已提交
52 53 54 55 56
}

}  // namespace lite
}  // namespace paddle

S
Superjomn 已提交
57
int main(int argc, char** argv) {
S
superjomn 已提交
58 59 60 61 62 63 64 65 66 67 68
  CHECK_EQ(argc, 2) << "usage: ./cmd <model_dir>";
  paddle::lite::Run(argv[1]);

  return 0;
}

USE_LITE_OP(mul);
USE_LITE_OP(fc);
USE_LITE_OP(scale);
USE_LITE_OP(feed);
USE_LITE_OP(fetch);
69 70
USE_LITE_OP(io_copy);

71
USE_LITE_OP(conv2d);
72
// USE_LITE_OP(batch_norm);
73 74 75 76 77 78
USE_LITE_OP(relu);
USE_LITE_OP(depthwise_conv2d);
USE_LITE_OP(pool2d);
USE_LITE_OP(elementwise_add);
USE_LITE_OP(softmax);

79 80 81 82
USE_LITE_KERNEL(feed, kHost, kAny, kAny, def);
USE_LITE_KERNEL(fetch, kHost, kAny, kAny, def);

#ifdef LITE_WITH_ARM
83 84 85
USE_LITE_KERNEL(fc, kARM, kFloat, kNCHW, def);
USE_LITE_KERNEL(mul, kARM, kFloat, kNCHW, def);
USE_LITE_KERNEL(scale, kARM, kFloat, kNCHW, def);
86

87
USE_LITE_KERNEL(conv2d, kARM, kFloat, kNCHW, def);
88 89
USE_LITE_KERNEL(batch_norm, kARM, kFloat, kNCHW, def);
USE_LITE_KERNEL(relu, kARM, kFloat, kNCHW, def);
90
USE_LITE_KERNEL(depthwise_conv2d, kARM, kFloat, kNCHW, def);
91 92
USE_LITE_KERNEL(pool2d, kARM, kFloat, kNCHW, def);
USE_LITE_KERNEL(elementwise_add, kARM, kFloat, kNCHW, def);
93
USE_LITE_KERNEL(softmax, kARM, kFloat, kNCHW, def);
94

95 96 97
// USE_LITE_KERNEL(feed, kARM, kAny, kAny, def);
// USE_LITE_KERNEL(fetch, kARM, kAny, kAny, def);
#endif  // LITE_WITH_ARM
S
superjomn 已提交
98 99 100 101 102 103

#ifdef LITE_WITH_CUDA
USE_LITE_KERNEL(mul, kCUDA, kFloat, kNCHW, def);
USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, host_to_device);
USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, device_to_host);
#endif