diff --git a/paddle/capi/Arguments.cpp b/paddle/capi/Arguments.cpp index cf773a65872fc3c0136994892197599558934da4..b983d72bb4271f4fac2e0b8818df28a99a16a2be 100644 --- a/paddle/capi/Arguments.cpp +++ b/paddle/capi/Arguments.cpp @@ -1,49 +1,61 @@ #include "PaddleCAPI.h" #include "PaddleCAPIPrivate.h" -#define cast(v) paddle::capi::cast(v) +using paddle::capi::cast; + +#define castArg(v) cast(v) +#define castIVec(v) cast(v) extern "C" { int PDArgsCreateNone(PD_Arguments* args) { auto ptr = new paddle::capi::CArguments(); *args = ptr; - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDArgsDestroy(PD_Arguments args) { - if (args == nullptr) return PD_NULLPTR; - delete cast(args); - return PD_NO_ERROR; + if (args == nullptr) return kPD_NULLPTR; + delete castArg(args); + return kPD_NO_ERROR; } int PDArgsGetSize(PD_Arguments args, uint64_t* size) { - if (args == nullptr || size == nullptr) return PD_NULLPTR; - *size = cast(args)->args.size(); - return PD_NO_ERROR; + if (args == nullptr || size == nullptr) return kPD_NULLPTR; + *size = castArg(args)->args.size(); + return kPD_NO_ERROR; } int PDArgsResize(PD_Arguments args, uint64_t size) { - if (args == nullptr) return PD_NULLPTR; - cast(args)->args.resize(size); - return PD_NO_ERROR; + if (args == nullptr) return kPD_NULLPTR; + castArg(args)->args.resize(size); + return kPD_NO_ERROR; } int PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { - if (args == nullptr || mat == nullptr) return PD_NULLPTR; + if (args == nullptr || mat == nullptr) return kPD_NULLPTR; auto m = paddle::capi::cast(mat); - if (m->mat == nullptr) return PD_NULLPTR; - auto a = cast(args); - if (ID >= a->args.size()) return PD_OUT_OF_RANGE; + if (m->mat == nullptr) return kPD_NULLPTR; + auto a = castArg(args); + if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; a->args[ID].value = m->mat; - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) { - if (args == nullptr || mat == nullptr) return PD_NULLPTR; + if (args == nullptr || mat == nullptr) return kPD_NULLPTR; auto m = paddle::capi::cast(mat); - auto a = cast(args); - if (ID >= a->args.size()) return PD_OUT_OF_RANGE; + auto a = castArg(args); + if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; m->mat = a->args[ID].value; - return PD_NO_ERROR; + return kPD_NO_ERROR; +} + +int PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) { + if (args == nullptr || ids == nullptr) return kPD_NULLPTR; + auto iv = castIVec(ids); + auto a = castArg(args); + if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; + iv->vec = a->args[ID].ids; + return kPD_NO_ERROR; } } diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index 80cf2c7fa913df0a8d7fff4af792ae200c61334e..93b6b41254d2e0c3ac4032621c2839ca34a0d4a0 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -12,34 +12,11 @@ set(CAPI_PRIVATE_HEADER PaddleCAPIPrivate.h) file(GLOB CAPI_SOURCES *.cpp) -add_library(paddle_capi SHARED ${CAPI_SOURCES}) +add_library(paddle_capi STATIC ${CAPI_SOURCES}) target_include_directories(paddle_capi PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) add_dependencies(paddle_capi gen_proto_cpp) -target_link_libraries(paddle_capi - "-Wl,-force_load" - paddle_gserver - "-Wl,-force_load" - paddle_function - paddle_pserver - paddle_trainer_lib - paddle_network - paddle_math - paddle_utils - paddle_parameter - paddle_proto - paddle_cuda - ${PROTOBUF_LIBRARY} - ${LIBGLOG_LIBRARY} - gflags - ${CMAKE_THREAD_LIBS_INIT} - ${CBLAS_LIBS} - ${ZLIB_LIBRARIES} - ${INTERAL_LIBS} - ${CMAKE_DL_LIBS} - ${PYTHON_LIBRARIES}) - set(PADDLE_CAPI_INC_PATH ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) diff --git a/paddle/capi/GradientMachine.cpp b/paddle/capi/GradientMachine.cpp index 2969b5f198f766af0d24d7826ef22404fc366031..ef584ed8d0d0da94cd49b39ce5c6152c9a7e3839 100644 --- a/paddle/capi/GradientMachine.cpp +++ b/paddle/capi/GradientMachine.cpp @@ -27,22 +27,76 @@ extern "C" { int PDGradientMachineCreateForPredict(PD_GradiemtMachine* machine, void* modelConfigProtobuf, int size) { - if (modelConfigProtobuf == nullptr) return PD_NULLPTR; + if (modelConfigProtobuf == nullptr) return kPD_NULLPTR; paddle::ModelConfig config; if (!config.ParseFromArray(modelConfigProtobuf, size) || !config.IsInitialized()) { - return PD_PROTOBUF_ERROR; + return kPD_PROTOBUF_ERROR; } auto ptr = new paddle::capi::CGradientMachine(); ptr->machine.reset(paddle::GradientMachine::create( config, CREATE_MODE_TESTING, {paddle::PARAMETER_VALUE})); *machine = ptr; - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDGradientMachineDestroy(PD_GradiemtMachine machine) { delete cast(machine); - return PD_NO_ERROR; + return kPD_NO_ERROR; +} + +int PDGradientMachineLoadParameterFromDisk(PD_GradiemtMachine machine, + const char* path) { + auto m = cast(machine); + if (m == nullptr || path == nullptr || m->machine == nullptr) + return kPD_NULLPTR; + m->machine->loadParameters(path); + return kPD_NO_ERROR; +} + +int PDGradientMachineForward(PD_GradiemtMachine machine, + PD_Arguments inArgs, + PD_Arguments outArgs, + bool isTrain) { + auto m = cast(machine); + auto in = paddle::capi::cast(inArgs); + auto out = paddle::capi::cast(outArgs); + if (m == nullptr || in == nullptr || out == nullptr || m->machine == nullptr) + return kPD_NULLPTR; + m->machine->forward( + in->args, &out->args, isTrain ? paddle::PASS_TRAIN : paddle::PASS_TEST); + return kPD_NO_ERROR; +} + +int PDGradientMachineCreateSharedParam(PD_GradiemtMachine origin, + void* modelConfigProtobuf, + int size, + PD_GradiemtMachine* slave) { + auto o = cast(origin); + if (origin == nullptr || slave == nullptr || o->machine == nullptr) { + return kPD_NULLPTR; + } + paddle::ModelConfig config; + if (!config.ParseFromArray(modelConfigProtobuf, size) || + !config.IsInitialized()) { + return kPD_PROTOBUF_ERROR; + } + + std::unique_ptr ptr( + new paddle::capi::CGradientMachine()); + auto nn = paddle::NeuralNetwork::create(config); + nn->init(config, + [&o](int paramId, paddle::Parameter* param) { + auto p = o->machine->getParameters()[paramId]; + param->enableSharedType(paddle::PARAMETER_VALUE, + p->getBuf(paddle::PARAMETER_VALUE)); + + }, + {paddle::PARAMETER_VALUE}, + false); + ptr->machine.reset(nn); + *slave = ptr.release(); + return kPD_NO_ERROR; } } diff --git a/paddle/capi/Main.cpp b/paddle/capi/Main.cpp index cc07e2ba4ef8179297f1961ea0a33c5553196c27..8cd0104be2e07f7b3093ec36a584019f33f55010 100644 --- a/paddle/capi/Main.cpp +++ b/paddle/capi/Main.cpp @@ -24,6 +24,6 @@ int PDInit(int argc, char** argv) { } initPaddle(argc + 1, realArgv.data()); free(realArgv[0]); - return PD_NO_ERROR; + return kPD_NO_ERROR; } } diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp index 71598b1714d1986109635e901ef7ce574ddb0946..dc1b4f3379d2b078bc365dd56bbaec96efa4856e 100644 --- a/paddle/capi/Matrix.cpp +++ b/paddle/capi/Matrix.cpp @@ -8,27 +8,27 @@ int PDMatCreate(PD_Matrix* mat, uint64_t height, uint64_t width, bool useGpu) { auto ptr = new paddle::capi::CMatrix(); ptr->mat = paddle::Matrix::create(height, width, false, useGpu); *mat = ptr; - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDMatCreateNone(PD_Matrix* mat) { auto ptr = new paddle::capi::CMatrix(); *mat = ptr; - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDMatDestroy(PD_Matrix mat) { - if (mat == nullptr) return PD_NULLPTR; + if (mat == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); delete ptr; - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray) { - if (mat == nullptr) return PD_NULLPTR; + if (mat == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); - if (ptr->mat == nullptr) return PD_NULLPTR; - if (rowID >= ptr->mat->getHeight()) return PD_OUT_OF_RANGE; + if (ptr->mat == nullptr) return kPD_NULLPTR; + if (rowID >= ptr->mat->getHeight()) return kPD_OUT_OF_RANGE; paddle::real* buf = ptr->mat->getRowBuf(rowID); size_t width = ptr->mat->getWidth(); #ifndef PADDLE_ONLY_CPU @@ -36,26 +36,26 @@ int PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray) { #else std::copy(rowArray, rowArray + width, buf); #endif - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDMatGetRow(PD_Matrix mat, uint64_t rowID, pd_real** rawRowBuffer) { - if (mat == nullptr) return PD_NULLPTR; + if (mat == nullptr) return kPD_NULLPTR; auto ptr = cast(mat); - if (ptr->mat == nullptr) return PD_NULLPTR; - if (rowID >= ptr->mat->getHeight()) return PD_OUT_OF_RANGE; + if (ptr->mat == nullptr) return kPD_NULLPTR; + if (rowID >= ptr->mat->getHeight()) return kPD_OUT_OF_RANGE; *rawRowBuffer = ptr->mat->getRowBuf(rowID); - return PD_NO_ERROR; + return kPD_NO_ERROR; } int PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width) { - if (mat == nullptr) return PD_NULLPTR; + if (mat == nullptr) return kPD_NULLPTR; if (height != nullptr) { *height = cast(mat)->mat->getHeight(); } if (width != nullptr) { *width = cast(mat)->mat->getWidth(); } - return PD_NO_ERROR; + return kPD_NO_ERROR; } } diff --git a/paddle/capi/PaddleCAPI.h b/paddle/capi/PaddleCAPI.h index 17a2498671859f703fd9c0735a82f9e2c531f205..b848603e8a345c6adb0d9c3482b7a7fa1aa585a4 100644 --- a/paddle/capi/PaddleCAPI.h +++ b/paddle/capi/PaddleCAPI.h @@ -8,20 +8,20 @@ extern "C" { #endif typedef enum { - PD_NO_ERROR = 0, - PD_NULLPTR = 1, - PD_OUT_OF_RANGE = 2, - PD_PROTOBUF_ERROR = 3, - PD_UNDEFINED_ERROR = -1, + kPD_NO_ERROR = 0, + kPD_NULLPTR = 1, + kPD_OUT_OF_RANGE = 2, + kPD_PROTOBUF_ERROR = 3, + kPD_UNDEFINED_ERROR = -1, } PD_Error; -typedef void* PD_Vector; +typedef void* PD_IVector; -int PDVecCreate(PD_Vector* vec, uint64_t size, bool useGpu); +int PDIVecCreateNone(PD_IVector* ivec); -int PDVecDestroy(PD_Vector vec); +int PDIVecDestroy(PD_IVector ivec); -int PDVecIsSparse(PD_Vector vec, bool* isSparse); +int PDIVectorGet(PD_IVector ivec, int** buffer); typedef void* PD_Matrix; @@ -51,12 +51,27 @@ int PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat); +int PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids); + typedef void* PD_GradiemtMachine; int PDGradientMachineCreateForPredict(PD_GradiemtMachine* machine, void* modelConfigProtobuf, int size); +int PDGradientMachineLoadParameterFromDisk(PD_GradiemtMachine machine, + const char* path); + +int PDGradientMachineForward(PD_GradiemtMachine machine, + PD_Arguments inArgs, + PD_Arguments outArgs, + bool isTrain); + +int PDGradientMachineCreateSharedParam(PD_GradiemtMachine origin, + void* modelConfigProtobuf, + int size, + PD_GradiemtMachine* slave); + int PDGradientMachineDestroy(PD_GradiemtMachine machine); int PDInit(int argc, char** argv); diff --git a/paddle/capi/PaddleCAPIPrivate.h b/paddle/capi/PaddleCAPIPrivate.h index 07e731f6cd0c287c4b738f9634b18535826aab42..1aae3cedf384bc788195504f51d92d23802fb61b 100644 --- a/paddle/capi/PaddleCAPIPrivate.h +++ b/paddle/capi/PaddleCAPIPrivate.h @@ -8,20 +8,40 @@ namespace paddle { namespace capi { -struct CVector { - VectorPtr vec; +enum CType { kIVECTOR = 0, kMATRIX, kARGUMENTS, kGRADIENT_MACHINE }; + +#define STRUCT_HEADER CType type; + +struct CHeader { + STRUCT_HEADER +}; + +struct CIVector { + STRUCT_HEADER + IVectorPtr vec; + + CIVector() : type(kIVECTOR) {} }; struct CMatrix { + STRUCT_HEADER MatrixPtr mat; + + CMatrix() : type(kMATRIX) {} }; struct CArguments { + STRUCT_HEADER std::vector args; + + CArguments() : type(kARGUMENTS) {} }; struct CGradientMachine { + STRUCT_HEADER paddle::GradientMachinePtr machine; + + CGradientMachine() : type(kGRADIENT_MACHINE) {} }; template diff --git a/paddle/capi/Vector.cpp b/paddle/capi/Vector.cpp index 10dee7816c376eb7376d4fa50fc3b524c84a8f86..2ac795668ffc9a9d4a461acbba0c4f3c2a2fdb4d 100644 --- a/paddle/capi/Vector.cpp +++ b/paddle/capi/Vector.cpp @@ -1,26 +1,28 @@ #include "PaddleCAPI.h" #include "PaddleCAPIPrivate.h" -#define cast(v) paddle::capi::cast(v) +using paddle::capi::cast; + extern "C" { -int PDVecCreate(PD_Vector* vec, uint64_t size, bool useGpu) { - auto ptr = new paddle::capi::CVector(); - ptr->vec = paddle::Vector::create(size, useGpu); - *vec = ptr; - return PD_NO_ERROR; + +int PDIVecCreateNone(PD_IVector* ivec) { + if (ivec == nullptr) return kPD_NULLPTR; + auto ptr = new paddle::capi::CIVector(); + *ivec = ptr; + return kPD_NO_ERROR; } -int PDVecDestroy(PD_Vector vec) { - auto v = cast(vec); - v->vec.reset(); - delete v; - return PD_NO_ERROR; + +int PDIVecDestroy(PD_IVector ivec) { + if (ivec == nullptr) return kPD_NULLPTR; + delete cast(ivec); + return kPD_NO_ERROR; } -int PDVecIsSparse(PD_Vector vec, bool* isSparse) { - if (isSparse == nullptr || vec == nullptr) { - return PD_NULLPTR; - } - *isSparse = cast(vec)->vec->isSparse(); - return PD_NO_ERROR; +int PDIVectorGet(PD_IVector ivec, int** buffer) { + if (ivec == nullptr || buffer == nullptr) return kPD_NULLPTR; + auto v = cast(ivec); + if (v->vec == nullptr) return kPD_NULLPTR; + *buffer = v->vec->getData(); + return kPD_NO_ERROR; } } diff --git a/paddle/capi/tests/.gitignore b/paddle/capi/tests/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..7ab6be95e397fa8f0339294a00c2f057bc116792 --- /dev/null +++ b/paddle/capi/tests/.gitignore @@ -0,0 +1,2 @@ +w +b diff --git a/paddle/capi/tests/CMakeLists.txt b/paddle/capi/tests/CMakeLists.txt index e54a53e2935b401f9cec1d3a48f4ea127dec7960..d81453982bfcfb13cfb8e143625093548f94b624 100644 --- a/paddle/capi/tests/CMakeLists.txt +++ b/paddle/capi/tests/CMakeLists.txt @@ -1,35 +1,10 @@ -function(add_capi_unittest_without_exec TARGET_NAME) - set(with_test_main ON) - set(sources) - foreach(source_file ${ARGN}) - if (${source_file} STREQUAL "NO_MAIN") - set(with_test_main OFF) - else() - list(APPEND sources ${source_file}) - endif() - endforeach() - - add_executable( - ${TARGET_NAME} - ${sources}) - - target_link_libraries( - ${TARGET_NAME} - paddle_capi - ${GTEST_LIBRARIES}) - - if (with_test_main) - target_link_libraries( - ${TARGET_NAME} paddle_test_main) - endif() - target_include_directories(${TARGET_NAME} PUBLIC ${PADDLE_CAPI_INC_PATH}) -endfunction() - -function(add_capi_unittest TARGET_NAME) - add_capi_unittest_without_exec(${TARGET_NAME} ${ARGN}) - add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME}) -endfunction() -add_capi_unittest(capi_test_mats test_Vector.cpp +add_unittest(capi_test_mats test_Vector.cpp test_Matrix.cpp test_Arguments.cpp) -add_capi_unittest(capi_test_gradientMachine NO_MAIN test_GradientMachine.cpp) +target_include_directories(capi_test_mats PUBLIC ${PADDLE_CAPI_INC_PATH}) +target_link_libraries(capi_test_mats paddle_capi) +add_unittest(capi_test_gradientMachine test_GradientMachine.cpp) + +target_include_directories(capi_test_gradientMachine PUBLIC + ${PADDLE_CAPI_INC_PATH}) +target_link_libraries(capi_test_gradientMachine paddle_capi) diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/capi/tests/test_Arguments.cpp index c74abd60d1b106c55fc145c0ac32c9d175d82623..fe9762deed9b3099e1e4fbf5cfea42420968e155 100644 --- a/paddle/capi/tests/test_Arguments.cpp +++ b/paddle/capi/tests/test_Arguments.cpp @@ -15,40 +15,44 @@ static std::vector randomBuffer(size_t bufSize) { TEST(CAPIArguments, create) { PD_Arguments args; - ASSERT_EQ(PD_NO_ERROR, PDArgsCreateNone(&args)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&args)); uint64_t size; - ASSERT_EQ(PD_NO_ERROR, PDArgsGetSize(args, &size)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsGetSize(args, &size)); ASSERT_EQ(0UL, size); - ASSERT_EQ(PD_NO_ERROR, PDArgsDestroy(args)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); } TEST(CAPIArguments, value) { PD_Arguments args; - ASSERT_EQ(PD_NO_ERROR, PDArgsCreateNone(&args)); - ASSERT_EQ(PD_NO_ERROR, PDArgsResize(args, 1)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&args)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(args, 1)); PD_Matrix mat; - ASSERT_EQ(PD_NO_ERROR, PDMatCreate(&mat, 128, 64, false)); + ASSERT_EQ(kPD_NO_ERROR, PDMatCreate(&mat, 128, 64, false)); for (size_t i = 0; i < 128; ++i) { std::vector sampleBuf = randomBuffer(64); PDMatCopyToRow(mat, i, sampleBuf.data()); } - ASSERT_EQ(PD_NO_ERROR, PDArgsSetValue(args, 0, mat)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsSetValue(args, 0, mat)); PD_Matrix val; - ASSERT_EQ(PD_NO_ERROR, PDMatCreateNone(&val)); + ASSERT_EQ(kPD_NO_ERROR, PDMatCreateNone(&val)); - ASSERT_EQ(PD_NO_ERROR, PDArgsGetValue(args, 0, val)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsGetValue(args, 0, val)); for (size_t i = 0; i < 128; ++i) { pd_real* row1; pd_real* row2; - ASSERT_EQ(PD_NO_ERROR, PDMatGetRow(mat, i, &row1)); - ASSERT_EQ(PD_NO_ERROR, PDMatGetRow(val, i, &row2)); + ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, i, &row1)); + ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(val, i, &row2)); ASSERT_EQ(row1, row2); } - ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(val)); - ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(mat)); - ASSERT_EQ(PD_NO_ERROR, PDArgsDestroy(args)); + + PD_IVector ivec; + ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&ivec)); + ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(ivec)); + ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(val)); + ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args)); } diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/capi/tests/test_GradientMachine.cpp index f07d1e4e7294fc629dd91ee36cd7917f9aa24bc1..63fb47bd27be222cf5c4f11841000e6b315d1328 100644 --- a/paddle/capi/tests/test_GradientMachine.cpp +++ b/paddle/capi/tests/test_GradientMachine.cpp @@ -1,18 +1,96 @@ #include +#include #include #include #include +#include #include "PaddleCAPI.h" +#include "paddle/utils/ThreadLocal.h" -TEST(GradientMachine, load) { - paddle::TrainerConfigHelper config("./vgg_16_cifar.py"); +static std::vector randomBuffer(size_t bufSize) { + auto& eng = paddle::ThreadLocalRandomEngine::get(); + std::uniform_real_distribution dist(-1.0, 1.0); + std::vector retv; + retv.reserve(bufSize); + for (size_t i = 0; i < bufSize; ++i) { + retv.push_back(dist(eng)); + } + return retv; +} + +TEST(GradientMachine, testPredict) { + paddle::TrainerConfigHelper config("./test_predict_network.py"); std::string buffer; ASSERT_TRUE(config.getModelConfig().SerializeToString(&buffer)); PD_GradiemtMachine machine; - ASSERT_EQ(PD_NO_ERROR, + ASSERT_EQ(kPD_NO_ERROR, PDGradientMachineCreateForPredict( &machine, &buffer[0], (int)buffer.size())); + std::unique_ptr gm( + paddle::GradientMachine::create(config.getModelConfig())); + ASSERT_NE(nullptr, gm); + gm->randParameters(); + gm->saveParameters("./"); + + ASSERT_EQ(kPD_NO_ERROR, + PDGradientMachineLoadParameterFromDisk(machine, "./")); + + PD_GradiemtMachine machineSlave; + ASSERT_EQ(kPD_NO_ERROR, + PDGradientMachineCreateSharedParam( + machine, &buffer[0], (int)buffer.size(), &machineSlave)); + std::swap(machineSlave, machine); + PD_Arguments outArgs; + ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&outArgs)); + + PD_Arguments inArgs; + ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&inArgs)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(inArgs, 1)); + PD_Matrix mat; + ASSERT_EQ(kPD_NO_ERROR, PDMatCreate(&mat, 1, 100, false)); + static_assert(std::is_same::value, ""); + + auto data = randomBuffer(100); + pd_real* rowPtr; + ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, 0, &rowPtr)); + memcpy(rowPtr, data.data(), data.size() * sizeof(pd_real)); + + ASSERT_EQ(kPD_NO_ERROR, PDArgsSetValue(inArgs, 0, mat)); + ASSERT_EQ(kPD_NO_ERROR, + PDGradientMachineForward(machine, inArgs, outArgs, false)); + + uint64_t sz; + ASSERT_EQ(kPD_NO_ERROR, PDArgsGetSize(outArgs, &sz)); + ASSERT_EQ(1UL, sz); + + ASSERT_EQ(kPD_NO_ERROR, PDArgsGetValue(outArgs, 0, mat)); + std::vector paddleInArgs; + std::vector paddleOutArgs; + paddleInArgs.resize(1); + paddleInArgs[0].value = + paddle::Matrix::create(data.data(), 1, 100, false, false); + + gm->forward(paddleInArgs, &paddleOutArgs, paddle::PASS_TEST); + + auto matPaddle = paddleOutArgs[0].value; + + uint64_t height, width; + ASSERT_EQ(kPD_NO_ERROR, PDMatGetShape(mat, &height, &width)); + ASSERT_EQ(matPaddle->getHeight(), height); + ASSERT_EQ(matPaddle->getWidth(), width); + + ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, 0, &rowPtr)); + for (size_t i = 0; i < width; ++i) { + ASSERT_NEAR(matPaddle->getData()[i], rowPtr[i], 1e-5); + } + + ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(inArgs)); + ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(outArgs)); + std::swap(machineSlave, machine); + ASSERT_EQ(kPD_NO_ERROR, PDGradientMachineDestroy(machineSlave)); + ASSERT_EQ(kPD_NO_ERROR, PDGradientMachineDestroy(machine)); } int main(int argc, char** argv) { diff --git a/paddle/capi/tests/test_Matrix.cpp b/paddle/capi/tests/test_Matrix.cpp index 0f04a4683049abae270259d8928e7efccbc07879..97913f7229f3edc25deb3df303db2ca5f1fa4e18 100644 --- a/paddle/capi/tests/test_Matrix.cpp +++ b/paddle/capi/tests/test_Matrix.cpp @@ -3,31 +3,31 @@ TEST(CAPIMatrix, create) { PD_Matrix mat; - ASSERT_EQ(PD_NO_ERROR, PDMatCreate(&mat, 128, 32, false)); + ASSERT_EQ(kPD_NO_ERROR, PDMatCreate(&mat, 128, 32, false)); std::vector sampleRow; sampleRow.resize(32); for (size_t i = 0; i < sampleRow.size(); ++i) { sampleRow[i] = 1.0 / (i + 1.0); } - ASSERT_EQ(PD_NO_ERROR, PDMatCopyToRow(mat, 0, sampleRow.data())); - ASSERT_EQ(PD_OUT_OF_RANGE, PDMatCopyToRow(mat, 128, sampleRow.data())); + ASSERT_EQ(kPD_NO_ERROR, PDMatCopyToRow(mat, 0, sampleRow.data())); + ASSERT_EQ(kPD_OUT_OF_RANGE, PDMatCopyToRow(mat, 128, sampleRow.data())); pd_real* arrayPtr; - ASSERT_EQ(PD_NO_ERROR, PDMatGetRow(mat, 0, &arrayPtr)); + ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, 0, &arrayPtr)); for (size_t i = 0; i < sampleRow.size(); ++i) { ASSERT_NEAR(sampleRow[i], arrayPtr[i], 1e-5); } uint64_t height, width; - ASSERT_EQ(PD_NO_ERROR, PDMatGetShape(mat, &height, &width)); + ASSERT_EQ(kPD_NO_ERROR, PDMatGetShape(mat, &height, &width)); ASSERT_EQ(128, height); ASSERT_EQ(32, width); - ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(mat)); + ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); } TEST(CAPIMatrix, createNone) { PD_Matrix mat; - ASSERT_EQ(PD_NO_ERROR, PDMatCreateNone(&mat)); - ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(mat)); + ASSERT_EQ(kPD_NO_ERROR, PDMatCreateNone(&mat)); + ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat)); } diff --git a/paddle/capi/tests/test_Vector.cpp b/paddle/capi/tests/test_Vector.cpp index dbb987d440a84d1d71a59ba4df5f3dafe055e68f..907a63bc9e03d0a3aab0e5510f7b833365c14068 100644 --- a/paddle/capi/tests/test_Vector.cpp +++ b/paddle/capi/tests/test_Vector.cpp @@ -2,10 +2,7 @@ #include "gtest/gtest.h" TEST(CAPIVector, create) { - PD_Vector tmp; - ASSERT_EQ(PD_NO_ERROR, PDVecCreate(&tmp, 128, false)); - bool isSparse; - ASSERT_EQ(PD_NO_ERROR, PDVecIsSparse(tmp, &isSparse)); - ASSERT_FALSE(isSparse); - ASSERT_EQ(PD_NO_ERROR, PDVecDestroy(tmp)); + PD_IVector vec; + ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&vec)); + ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(vec)); } diff --git a/paddle/capi/tests/test_predict_network.py b/paddle/capi/tests/test_predict_network.py new file mode 100644 index 0000000000000000000000000000000000000000..82ef5cb1a70398df65ace3c802076743c3ebe341 --- /dev/null +++ b/paddle/capi/tests/test_predict_network.py @@ -0,0 +1,13 @@ +from paddle.trainer_config_helpers import * + +settings(batch_size=100) + +x = data_layer(name='x', size=100) + +y = fc_layer( + input=x, + size=100, + bias_attr=ParamAttr(name='b'), + param_attr=ParamAttr(name='w')) + +outputs(y) diff --git a/paddle/capi/tests/vgg_16_cifar.py b/paddle/capi/tests/vgg_16_cifar.py deleted file mode 120000 index 81250eefde63996c16b2d40af68e1850bef7a457..0000000000000000000000000000000000000000 --- a/paddle/capi/tests/vgg_16_cifar.py +++ /dev/null @@ -1 +0,0 @@ -../../../demo/image_classification/vgg_16_cifar.py \ No newline at end of file