diff --git a/CMakeLists.txt b/CMakeLists.txt index 5df83499d5dde29b205ee17fba81a63c9a643235..00996cb7ed5cc573c42b69be6db369c3654d6d1a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -20,8 +20,10 @@ set(PADDLE_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}) include(system) project(paddle CXX C Go) -message(STATUS "CXX compiler: " ${CMAKE_CXX_COMPILER} ", version: " ${CMAKE_CXX_COMPILER_VERSION}) -message(STATUS "C compiler: " ${CMAKE_C_COMPILER} ", version: " ${CMAKE_C_COMPILER_VERSION}) +message(STATUS "CXX compiler: ${CMAKE_CXX_COMPILER}, version: " + "${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}") +message(STATUS "C compiler: ${CMAKE_C_COMPILER}, version: " + "${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}") find_package(Sphinx) if(NOT CMAKE_CROSSCOMPILING) diff --git a/cmake/external/eigen.cmake b/cmake/external/eigen.cmake index 96fc886a342cae38d5b804266d3af7bc909a4da2..c4712f19eb80b34ffbf713d2b13fc0c775312af1 100644 --- a/cmake/external/eigen.cmake +++ b/cmake/external/eigen.cmake @@ -19,7 +19,7 @@ ExternalProject_Add( if (${CMAKE_VERSION} VERSION_LESS "3.3.0") set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/eigen3_dummy.c) - file(WRITE ${dummyfile} "const char * dummy_eigen3 = \"${dummyfile}\";") + file(WRITE ${dummyfile} "const char *dummy_eigen3 = \"${dummyfile}\";") add_library(eigen3 STATIC ${dummyfile}) else() add_library(eigen3 INTERFACE) diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index da25574793f3cea9f8eb5ed515b0aeef02c5c348..0e79c0cc7992060cbe3b668ec927936183389eb6 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -109,7 +109,7 @@ INCLUDE_DIRECTORIES(${CBLAS_INC_DIR}) # FIXME(gangliao): generate cblas target to track all high performance # linear algebra libraries for cc_library(xxx SRCS xxx.c DEPS cblas) SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c) -FILE(WRITE ${dummyfile} "const char * dummy = \"${dummyfile}\";") +FILE(WRITE ${dummyfile} "const char *dummy_cblas = \"${dummyfile}\";") ADD_LIBRARY(cblas STATIC ${dummyfile}) TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES}) diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 66c8e3ad7ef7c80c1f388c25983425a0db5c0220..585db019d521b1699baadfae31ef95b5059c71b4 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -120,7 +120,7 @@ function(merge_static_libs TARGET_NAME) DEPENDS ${libs}) # Generate dummy staic lib - file(WRITE ${target_SRCS} "const char *dummy = \"${target_SRCS}\";") + file(WRITE ${target_SRCS} "const char *dummy_${TARGET_NAME} = \"${target_SRCS}\";") add_library(${TARGET_NAME} STATIC ${target_SRCS}) target_link_libraries(${TARGET_NAME} ${libs_deps}) @@ -160,7 +160,7 @@ function(merge_static_libs TARGET_NAME) DEPENDS ${libs} ${target_OBJS}) # Generate dummy staic lib - file(WRITE ${target_SRCS} "const char *dummy = \"${target_SRCS}\";") + file(WRITE ${target_SRCS} "const char *dummy_${TARGET_NAME} = \"${target_SRCS}\";") add_library(${TARGET_NAME} STATIC ${target_SRCS}) target_link_libraries(${TARGET_NAME} ${libs_deps}) @@ -324,7 +324,7 @@ function(go_library TARGET_NAME) ) # Add dummy code to support `make target_name` under Terminal Command - file(WRITE ${dummyfile} "const char * dummy = \"${dummyfile}\";") + file(WRITE ${dummyfile} "const char *dummy_${TARGET_NAME} = \"${dummyfile}\";") if (go_library_SHARED OR go_library_shared) add_library(${TARGET_NAME} SHARED ${dummyfile}) else() diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 7d2becbdd772747d77890321fce6721d8d17fb30..4a98ede278fad85ff2beef3c8e7dd158912f693a 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -24,6 +24,7 @@ else() add_subdirectory(framework) add_subdirectory(operators) add_subdirectory(pybind) + add_subdirectory(inference) endif() if(WITH_SWIG_PY) diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 3e58e6442edfe006c8aed238f67b9524783601ee..e02e572af2c4ee122d033bc7b5231be94026c180 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -64,7 +64,7 @@ class CompileTimeInferShapeContext : public InferShapeContext { PADDLE_ENFORCE_EQ(in_var->GetType(), proto::VarDesc::LOD_TENSOR, "The %d-th output of Output(%s) must be LoDTensor.", j, out); - out_var->SetLoDLevel(in_var->GetLodLevel()); + out_var->SetLoDLevel(in_var->GetLoDLevel()); } bool IsRuntime() const override; diff --git a/paddle/framework/var_desc.cc b/paddle/framework/var_desc.cc index 7d002b9ea0b597730685ee03b021c4982f787f49..aeab18d7214f8d9dd79bc3d2e0322490445b3b49 100644 --- a/paddle/framework/var_desc.cc +++ b/paddle/framework/var_desc.cc @@ -52,7 +52,7 @@ void VarDesc::SetLoDLevel(int32_t lod_level) { } } -int32_t VarDesc::GetLodLevel() const { +int32_t VarDesc::GetLoDLevel() const { switch (desc_.type()) { case proto::VarDesc::LOD_TENSOR: return desc_.lod_tensor().lod_level(); diff --git a/paddle/framework/var_desc.h b/paddle/framework/var_desc.h index 4fd2abe7fb215c3ac454de3e30754685111eb570..fc482c467404a6b9dfed64c43871d91d3d10c766 100644 --- a/paddle/framework/var_desc.h +++ b/paddle/framework/var_desc.h @@ -76,7 +76,7 @@ class VarDesc { void SetLoDLevel(int32_t lod_level); - int32_t GetLodLevel() const; + int32_t GetLoDLevel() const; proto::VarDesc::VarType GetType() const; diff --git a/paddle/inference/CMakeLists.txt b/paddle/inference/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..8437b2b21942ead544dab8636db1b355b7cf7bd5 --- /dev/null +++ b/paddle/inference/CMakeLists.txt @@ -0,0 +1,47 @@ +set(FLUID_CORE_MODULES + backward proto_desc paddle_memory executor prune init ${GLOB_OP_LIB}) + +cc_library(paddle_fluid_api + SRCS inference.cc + DEPS ${FLUID_CORE_MODULES}) + +# Merge all modules into a simgle static library +cc_library(paddle_fluid DEPS paddle_fluid_api ${FLUID_CORE_MODULES}) + +# ptools +# just for testing, we may need to change the storing format for inference_model +# and move the dependent of pickle. +# download from http://www.picklingtools.com/ +# build in the C++ sub-directory, using command +# make -f Makefile.Linux libptools.so +set(PTOOLS_LIB) +set(PTOOLS_ROOT $ENV{PTOOLS_ROOT} CACHE PATH "Folder contains PicklingTools") +find_path(PTOOLS_INC_DIR chooseser.h PATHS ${PTOOLS_ROOT}/C++) +find_library(PTOOLS_SHARED_LIB NAMES ptools PATHS ${PTOOLS_ROOT}/C++) +if(PTOOLS_INC_DIR AND PTOOLS_SHARED_LIB) + add_definitions(-DPADDLE_USE_PTOOLS) + set(PTOOLS_LIB ptools) + message(STATUS "Found PicklingTools: ${PTOOLS_SHARED_LIB}") + add_library(${PTOOLS_LIB} SHARED IMPORTED GLOBAL) + set_property(TARGET ${PTOOLS_LIB} PROPERTY IMPORTED_LOCATION ${PTOOLS_SHARED_LIB}) + include_directories(${PTOOLS_ROOT}/C++) + include_directories(${PTOOLS_ROOT}/C++/opencontainers_1_8_5/include) + add_definitions(-DOC_NEW_STYLE_INCLUDES) # used in ptools +endif() + +add_executable(example example.cc) +if(APPLE) + set(OPTIONAL_LINK_FLAGS) + if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") + set(OPTIONAL_LINK_FLAGS "-undefined dynamic_lookup") + endif() + target_link_libraries(example + -Wl,-force_load paddle_fluid + ${OPTIONAL_LINK_FLAGS} + ${PTOOLS_LIB}) +else() + target_link_libraries(example + -Wl,--start-group -Wl,--whole-archive paddle_fluid + -Wl,--no-whole-archive -Wl,--end-group + ${PTOOLS_LIB}) +endif() diff --git a/paddle/inference/example.cc b/paddle/inference/example.cc new file mode 100644 index 0000000000000000000000000000000000000000..9711b20e6fb4099a2cc497029468ebd1fd0b3456 --- /dev/null +++ b/paddle/inference/example.cc @@ -0,0 +1,79 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include "gflags/gflags.h" +#include "paddle/inference/inference.h" + +DEFINE_string(dirname, "", "Directory of the inference model."); +DEFINE_string(feed_var_names, "", "Names of feeding variables"); +DEFINE_string(fetch_var_names, "", "Names of fetching variables"); + +int main(int argc, char** argv) { + google::ParseCommandLineFlags(&argc, &argv, true); + if (FLAGS_dirname.empty() || FLAGS_feed_var_names.empty() || + FLAGS_fetch_var_names.empty()) { + // Example: + // ./example --dirname=recognize_digits_mlp.inference.model + // --feed_var_names="x" + // --fetch_var_names="fc_2.tmp_2" + std::cout << "Usage: ./example --dirname=path/to/your/model " + "--feed_var_names=x --fetch_var_names=y" + << std::endl; + exit(1); + } + + std::cout << "FLAGS_dirname: " << FLAGS_dirname << std::endl; + std::cout << "FLAGS_feed_var_names: " << FLAGS_feed_var_names << std::endl; + std::cout << "FLAGS_fetch_var_names: " << FLAGS_fetch_var_names << std::endl; + + std::string dirname = FLAGS_dirname; + std::vector feed_var_names = {FLAGS_feed_var_names}; + std::vector fetch_var_names = {FLAGS_fetch_var_names}; + + paddle::InferenceEngine* engine = new paddle::InferenceEngine(); + engine->LoadInferenceModel(dirname, feed_var_names, fetch_var_names); + + paddle::framework::LoDTensor input; + srand(time(0)); + float* input_ptr = + input.mutable_data({1, 784}, paddle::platform::CPUPlace()); + for (int i = 0; i < 784; ++i) { + input_ptr[i] = rand() / (static_cast(RAND_MAX)); + } + + std::vector feeds; + feeds.push_back(input); + std::vector fetchs; + engine->Execute(feeds, fetchs); + + for (size_t i = 0; i < fetchs.size(); ++i) { + auto dims_i = fetchs[i].dims(); + std::cout << "dims_i:"; + for (int j = 0; j < dims_i.size(); ++j) { + std::cout << " " << dims_i[j]; + } + std::cout << std::endl; + std::cout << "result:"; + float* output_ptr = fetchs[i].data(); + for (int j = 0; j < paddle::framework::product(dims_i); ++j) { + std::cout << " " << output_ptr[j]; + } + std::cout << std::endl; + } + + delete engine; + return 0; +} diff --git a/paddle/inference/inference.cc b/paddle/inference/inference.cc new file mode 100644 index 0000000000000000000000000000000000000000..48a51efcd25afeb499ab9389c2f27c8bd2515cd8 --- /dev/null +++ b/paddle/inference/inference.cc @@ -0,0 +1,202 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "inference.h" +#include +#include "paddle/framework/executor.h" +#include "paddle/framework/feed_fetch_method.h" +#include "paddle/framework/init.h" +#include "paddle/framework/scope.h" + +#ifdef PADDLE_USE_PTOOLS +#include "chooseser.h" +#endif + +namespace paddle { + +void InferenceEngine::LoadInferenceModel( + const std::string& dirname, + const std::vector& feed_var_names, + const std::vector& fetch_var_names) { +#ifdef PADDLE_USE_PTOOLS + std::string model_filename = dirname + "/__model__"; + LOG(INFO) << "Using PicklingTools, loading model from " << model_filename; + Val v; + LoadValFromFile(model_filename.c_str(), v, SERIALIZE_P0); + std::string program_desc_str = v["program_desc_str"]; + LOG(INFO) << "program_desc_str's size: " << program_desc_str.size(); +// PicklingTools cannot parse the vector of strings correctly. +#else + // program_desc_str + // the inference.model is stored by following python codes: + // inference_program = fluid.io.get_inference_program(predict) + // model_filename = "recognize_digits_mlp.inference.model/inference.model" + // with open(model_filename, "w") as f: + // program_str = inference_program.desc.serialize_to_string() + // f.write(struct.pack('q', len(program_str))) + // f.write(program_str) + std::string model_filename = dirname + "/inference.model"; + LOG(INFO) << "loading model from " << model_filename; + std::ifstream fs(model_filename, std::ios_base::binary); + int64_t size = 0; + fs.read(reinterpret_cast(&size), sizeof(int64_t)); + LOG(INFO) << "program_desc_str's size: " << size; + std::string program_desc_str; + program_desc_str.resize(size); + fs.read(&program_desc_str[0], size); +#endif + program_ = new framework::ProgramDesc(program_desc_str); + GenerateLoadProgram(dirname); + + if (feed_var_names.empty() || fetch_var_names.empty()) { + LOG(FATAL) << "Please specify the feed_var_names and fetch_var_names."; + } + feed_var_names_ = feed_var_names; + fetch_var_names_ = fetch_var_names; + PrependFeedOp(); + AppendFetchOp(); +} + +bool InferenceEngine::IsParameter(const framework::VarDesc* var) { + if (var->Persistable()) { + // There are many unreachable variables in the program + for (size_t i = 0; i < program_->Size(); ++i) { + const framework::BlockDesc& block = program_->Block(i); + for (auto* op : block.AllOps()) { + for (auto input_argument_name : op->InputArgumentNames()) { + if (input_argument_name == var->Name()) { + return true; + } + } + } + } + } + return false; +} + +void InferenceEngine::GenerateLoadProgram(const std::string& dirname) { + framework::BlockDesc* global_block = program_->MutableBlock(0); + + load_program_ = new framework::ProgramDesc(); + framework::BlockDesc* load_block = load_program_->MutableBlock(0); + for (auto* var : global_block->AllVars()) { + if (IsParameter(var)) { + LOG(INFO) << "parameter's name: " << var->Name(); + + framework::VarDesc* new_var = load_block->Var(var->Name()); + new_var->SetShape(var->Shape()); + new_var->SetDataType(var->GetDataType()); + new_var->SetType(var->GetType()); + new_var->SetLoDLevel(var->GetLoDLevel()); + new_var->SetPersistable(true); + + // append_op + framework::OpDesc* op = load_block->AppendOp(); + op->SetType("load"); + op->SetOutput("Out", {new_var->Name()}); + op->SetAttr("file_path", {dirname + "/" + new_var->Name()}); + op->CheckAttrs(); + } + } +} + +void InferenceEngine::PrependFeedOp() { + if (!program_) { + LOG(FATAL) << "Please initialize the program_ first."; + } + + framework::BlockDesc* global_block = program_->MutableBlock(0); + + // create_var + framework::VarDesc* feed_var = global_block->Var("feed"); + feed_var->SetType(framework::proto::VarDesc::FEED_MINIBATCH); + feed_var->SetPersistable(true); + + // prepend feed_op + for (size_t i = 0; i < feed_var_names_.size(); ++i) { + std::string var_name = feed_var_names_[i]; + LOG(INFO) << "feed var's name: " << var_name; + + // prepend_op + framework::OpDesc* op = global_block->PrependOp(); + op->SetType("feed"); + op->SetInput("X", {"feed"}); + op->SetOutput("Out", {var_name}); + op->SetAttr("col", {static_cast(i)}); + op->CheckAttrs(); + } +} + +void InferenceEngine::AppendFetchOp() { + if (!program_) { + LOG(FATAL) << "Please initialize the program_ first."; + } + + framework::BlockDesc* global_block = program_->MutableBlock(0); + + // create_var + framework::VarDesc* fetch_var = global_block->Var("fetch"); + fetch_var->SetType(framework::proto::VarDesc::FETCH_LIST); + fetch_var->SetPersistable(true); + + // append fetch_op + for (size_t i = 0; i < fetch_var_names_.size(); ++i) { + std::string var_name = fetch_var_names_[i]; + LOG(INFO) << "fetch var's name: " << var_name; + + // append_op + framework::OpDesc* op = global_block->AppendOp(); + op->SetType("fetch"); + op->SetInput("X", {var_name}); + op->SetOutput("Out", {"fetch"}); + op->SetAttr("col", {static_cast(i)}); + op->CheckAttrs(); + } +} + +void InferenceEngine::Execute(const std::vector& feeds, + std::vector& fetchs) { + if (!program_ || !load_program_) { + LOG(FATAL) << "Please initialize the program_ and load_program_ first."; + } + + if (feeds.size() < feed_var_names_.size()) { + LOG(FATAL) << "Please feed " << feed_var_names_.size() << " input Tensors."; + } + + auto* place = new platform::CPUPlace(); + framework::InitDevices({"CPU"}); + framework::Executor* executor = new framework::Executor(*place); + framework::Scope* scope = new framework::Scope(); + + executor->Run(*load_program_, scope, 0, true, true); + + // set_feed_variable + for (size_t i = 0; i < feed_var_names_.size(); ++i) { + framework::SetFeedVariable(scope, feeds[i], "feed", i); + } + + executor->Run(*program_, scope, 0, true, true); + + // get_fetch_variable + fetchs.resize(fetch_var_names_.size()); + for (size_t i = 0; i < fetch_var_names_.size(); ++i) { + fetchs[i] = framework::GetFetchVariable(*scope, "fetch", i); + } + + delete place; + delete scope; + delete executor; +} +} // namespace paddle diff --git a/paddle/inference/inference.h b/paddle/inference/inference.h new file mode 100644 index 0000000000000000000000000000000000000000..a3f3ef4b440036a0b27353cc092eed1bbf96eeb3 --- /dev/null +++ b/paddle/inference/inference.h @@ -0,0 +1,50 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/block_desc.h" +#include "paddle/framework/lod_tensor.h" +#include "paddle/framework/program_desc.h" + +namespace paddle { + +class InferenceEngine { +public: + InferenceEngine() : program_(nullptr), load_program_(nullptr) {} + ~InferenceEngine() { + delete program_; + delete load_program_; + } + + void LoadInferenceModel(const std::string& dirname, + const std::vector& feed_var_names, + const std::vector& fetch_var_names); + void Execute(const std::vector& feeds, + std::vector& fetchs); + +private: + bool IsParameter(const framework::VarDesc* var); + void GenerateLoadProgram(const std::string& dirname); + void PrependFeedOp(); + void AppendFetchOp(); + +private: + framework::ProgramDesc* program_; + framework::ProgramDesc* load_program_; + std::vector feed_var_names_; + std::vector fetch_var_names_; +}; + +} // namespace paddle diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index 07292d47e9c165c67fe4a30ee7d851c350beb2e0..564a3700011c1b0c294c27af4b86ab967a0f0e1e 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -216,7 +216,7 @@ void BindVarDsec(py::module &m) { .def("set_dtype", &VarDesc::SetDataType) .def("shape", &VarDesc::Shape, py::return_value_policy::reference) .def("dtype", &VarDesc::GetDataType, py::return_value_policy::reference) - .def("lod_level", &VarDesc::GetLodLevel) + .def("lod_level", &VarDesc::GetLoDLevel) .def("set_lod_level", &VarDesc::SetLoDLevel) .def("type", &VarDesc::GetType) .def("set_type", &VarDesc::SetType) diff --git a/python/paddle/v2/fluid/io.py b/python/paddle/v2/fluid/io.py index c47ce82aba7fa5ac42ac26cd25fa3ebc93e96cb2..926327b70c70d250766c2640d808bb3e3516d37b 100644 --- a/python/paddle/v2/fluid/io.py +++ b/python/paddle/v2/fluid/io.py @@ -36,7 +36,7 @@ def save_vars(executor, dirname, main_program=None, vars=None, predicate=None): :param executor: executor that save variable :param dirname: directory path :param main_program: program. If vars is None, then filter all variables in this - program which fit `predicate`. Default g_program. + program which fit `predicate`. Default default_main_program. :param predicate: The Predicate describes a callable that returns a variable as a bool. If it returns true, the variables will be saved. :param vars: variables need to be saved. If specify vars, program & predicate diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py index fc073f6be8563a363c0f98b9235ae267fa68562d..51bfe2973db7bd2ec4b43bb588be4c1fcfb11e74 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py @@ -14,6 +14,7 @@ hidden1 = fluid.layers.fc(input=image, param_attr=fluid.ParamAttr( regularizer=regularizer, clip=fluid.clip.ClipByValue(10))) + hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu', @@ -73,5 +74,9 @@ for pass_id in range(PASS_NUM): + " test_acc=" + str(test_pass_acc)) if test_pass_acc > 0.7: + fluid.io.save_inference_model( + "./recognize_digits_mlp.inference.model/", ["x"], [predict], + exe) exit(0) + exit(1)