diff --git a/paddle/fluid/lite/api/cxx_api_bin.cc b/paddle/fluid/lite/api/cxx_api_bin.cc new file mode 100644 index 0000000000000000000000000000000000000000..d19a95b34a9adf758cc238ae56103c5782689fa4 --- /dev/null +++ b/paddle/fluid/lite/api/cxx_api_bin.cc @@ -0,0 +1,72 @@ +#include "paddle/fluid/lite/api/cxx_api.h" +#include "paddle/fluid/lite/core/mir/passes.h" +#include "paddle/fluid/lite/core/op_registry.h" + +namespace paddle { +namespace lite { + +void Run(const char* model_dir) { + lite::LightPredictor predictor; +#ifndef LITE_WITH_CUDA + std::vector valid_places({Place{TARGET(kHost), PRECISION(kFloat)}}); +#else + std::vector valid_places({ + Place{TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kNCHW)}, + Place{TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW)}, + Place{TARGET(kCUDA), PRECISION(kAny), DATALAYOUT(kNCHW)}, + Place{TARGET(kHost), PRECISION(kAny), DATALAYOUT(kNCHW)}, + Place{TARGET(kCUDA), PRECISION(kAny), DATALAYOUT(kAny)}, + Place{TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny)}, + }); +#endif + + predictor.Build(model_dir, Place{TARGET(kCUDA), PRECISION(kFloat)}, + valid_places); + + auto* input_tensor = predictor.GetInput(0); + input_tensor->Resize(DDim(std::vector({100, 100}))); + auto* data = input_tensor->mutable_data(); + for (int i = 0; i < 100 * 100; i++) { + data[i] = i; + } + + LOG(INFO) << "input " << *input_tensor; + + predictor.Run(); + + auto* out = predictor.GetOutput(0); + LOG(INFO) << out << " memory size " << out->data_size(); + LOG(INFO) << "out " << out->data()[0]; + LOG(INFO) << "out " << out->data()[1]; + LOG(INFO) << "dims " << out->dims(); + LOG(INFO) << "out " << *out; +} + +} // namespace lite +} // namespace paddle + +int main(int argc, char** argv ) { + CHECK_EQ(argc, 2) << "usage: ./cmd "; + paddle::lite::Run(argv[1]); + + return 0; +} + + +USE_LITE_OP(mul); +USE_LITE_OP(fc); +USE_LITE_OP(scale); +USE_LITE_OP(feed); +USE_LITE_OP(fetch); +USE_LITE_OP(io_copy); +USE_LITE_KERNEL(fc, kHost, kFloat, kNCHW, def); +USE_LITE_KERNEL(mul, kHost, kFloat, kNCHW, def); +USE_LITE_KERNEL(scale, kHost, kFloat, kNCHW, def); +USE_LITE_KERNEL(feed, kHost, kAny, kAny, def); +USE_LITE_KERNEL(fetch, kHost, kAny, kAny, def); + +#ifdef LITE_WITH_CUDA +USE_LITE_KERNEL(mul, kCUDA, kFloat, kNCHW, def); +USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, host_to_device); +USE_LITE_KERNEL(io_copy, kCUDA, kAny, kAny, device_to_host); +#endif diff --git a/paddle/fluid/lite/core/framework.proto b/paddle/fluid/lite/core/framework.proto new file mode 120000 index 0000000000000000000000000000000000000000..d98f7d057fe065712598f0f27a94990d12c7f85a --- /dev/null +++ b/paddle/fluid/lite/core/framework.proto @@ -0,0 +1 @@ +../../../fluid/framework/framework.proto \ No newline at end of file diff --git a/paddle/fluid/lite/utils/cp_logging.cc b/paddle/fluid/lite/utils/cp_logging.cc new file mode 100644 index 0000000000000000000000000000000000000000..d72cf0d782fd8b17516242440995cb9fe5eb389b --- /dev/null +++ b/paddle/fluid/lite/utils/cp_logging.cc @@ -0,0 +1,5 @@ +#include "paddle/fluid/lite/utils/cp_logging.h" + +namespace paddle { +namespace lite {} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/utils/cp_logging.h b/paddle/fluid/lite/utils/cp_logging.h new file mode 100644 index 0000000000000000000000000000000000000000..885670cc28bfe287850752bddb7f0c8e98e55d56 --- /dev/null +++ b/paddle/fluid/lite/utils/cp_logging.h @@ -0,0 +1,5 @@ +#ifdef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK +#include "paddle/fluid/lite/utils/logging.h" +#else // LITE_WITH_LIGHT_WEIGHT_FRAMEWORK +#include +#endif // LITE_WITH_LIGHT_WEIGHT_FRAMEWORK