提交 fdb64acc 编写于 作者: Y Yu Yang

add unittest for prediction

上级 106620ea
#include "PaddleCAPI.h"
#include "PaddleCAPIPrivate.h"
#define cast(v) paddle::capi::cast<paddle::capi::CArguments>(v)
using paddle::capi::cast;
#define castArg(v) cast<paddle::capi::CArguments>(v)
#define castIVec(v) cast<paddle::capi::CIVector>(v)
extern "C" {
int PDArgsCreateNone(PD_Arguments* args) {
auto ptr = new paddle::capi::CArguments();
*args = ptr;
return PD_NO_ERROR;
return kPD_NO_ERROR;
}
int PDArgsDestroy(PD_Arguments args) {
if (args == nullptr) return PD_NULLPTR;
delete cast(args);
return PD_NO_ERROR;
if (args == nullptr) return kPD_NULLPTR;
delete castArg(args);
return kPD_NO_ERROR;
}
int PDArgsGetSize(PD_Arguments args, uint64_t* size) {
if (args == nullptr || size == nullptr) return PD_NULLPTR;
*size = cast(args)->args.size();
return PD_NO_ERROR;
if (args == nullptr || size == nullptr) return kPD_NULLPTR;
*size = castArg(args)->args.size();
return kPD_NO_ERROR;
}
int PDArgsResize(PD_Arguments args, uint64_t size) {
if (args == nullptr) return PD_NULLPTR;
cast(args)->args.resize(size);
return PD_NO_ERROR;
if (args == nullptr) return kPD_NULLPTR;
castArg(args)->args.resize(size);
return kPD_NO_ERROR;
}
int PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) {
if (args == nullptr || mat == nullptr) return PD_NULLPTR;
if (args == nullptr || mat == nullptr) return kPD_NULLPTR;
auto m = paddle::capi::cast<paddle::capi::CMatrix>(mat);
if (m->mat == nullptr) return PD_NULLPTR;
auto a = cast(args);
if (ID >= a->args.size()) return PD_OUT_OF_RANGE;
if (m->mat == nullptr) return kPD_NULLPTR;
auto a = castArg(args);
if (ID >= a->args.size()) return kPD_OUT_OF_RANGE;
a->args[ID].value = m->mat;
return PD_NO_ERROR;
return kPD_NO_ERROR;
}
int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat) {
if (args == nullptr || mat == nullptr) return PD_NULLPTR;
if (args == nullptr || mat == nullptr) return kPD_NULLPTR;
auto m = paddle::capi::cast<paddle::capi::CMatrix>(mat);
auto a = cast(args);
if (ID >= a->args.size()) return PD_OUT_OF_RANGE;
auto a = castArg(args);
if (ID >= a->args.size()) return kPD_OUT_OF_RANGE;
m->mat = a->args[ID].value;
return PD_NO_ERROR;
return kPD_NO_ERROR;
}
int PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids) {
if (args == nullptr || ids == nullptr) return kPD_NULLPTR;
auto iv = castIVec(ids);
auto a = castArg(args);
if (ID >= a->args.size()) return kPD_OUT_OF_RANGE;
iv->vec = a->args[ID].ids;
return kPD_NO_ERROR;
}
}
......@@ -12,34 +12,11 @@ set(CAPI_PRIVATE_HEADER
PaddleCAPIPrivate.h)
file(GLOB CAPI_SOURCES *.cpp)
add_library(paddle_capi SHARED ${CAPI_SOURCES})
add_library(paddle_capi STATIC ${CAPI_SOURCES})
target_include_directories(paddle_capi PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
add_dependencies(paddle_capi gen_proto_cpp)
target_link_libraries(paddle_capi
"-Wl,-force_load"
paddle_gserver
"-Wl,-force_load"
paddle_function
paddle_pserver
paddle_trainer_lib
paddle_network
paddle_math
paddle_utils
paddle_parameter
paddle_proto
paddle_cuda
${PROTOBUF_LIBRARY}
${LIBGLOG_LIBRARY}
gflags
${CMAKE_THREAD_LIBS_INIT}
${CBLAS_LIBS}
${ZLIB_LIBRARIES}
${INTERAL_LIBS}
${CMAKE_DL_LIBS}
${PYTHON_LIBRARIES})
set(PADDLE_CAPI_INC_PATH
${CMAKE_CURRENT_BINARY_DIR}
${CMAKE_CURRENT_SOURCE_DIR})
......
......@@ -27,22 +27,76 @@ extern "C" {
int PDGradientMachineCreateForPredict(PD_GradiemtMachine* machine,
void* modelConfigProtobuf,
int size) {
if (modelConfigProtobuf == nullptr) return PD_NULLPTR;
if (modelConfigProtobuf == nullptr) return kPD_NULLPTR;
paddle::ModelConfig config;
if (!config.ParseFromArray(modelConfigProtobuf, size) ||
!config.IsInitialized()) {
return PD_PROTOBUF_ERROR;
return kPD_PROTOBUF_ERROR;
}
auto ptr = new paddle::capi::CGradientMachine();
ptr->machine.reset(paddle::GradientMachine::create(
config, CREATE_MODE_TESTING, {paddle::PARAMETER_VALUE}));
*machine = ptr;
return PD_NO_ERROR;
return kPD_NO_ERROR;
}
int PDGradientMachineDestroy(PD_GradiemtMachine machine) {
delete cast(machine);
return PD_NO_ERROR;
return kPD_NO_ERROR;
}
int PDGradientMachineLoadParameterFromDisk(PD_GradiemtMachine machine,
const char* path) {
auto m = cast(machine);
if (m == nullptr || path == nullptr || m->machine == nullptr)
return kPD_NULLPTR;
m->machine->loadParameters(path);
return kPD_NO_ERROR;
}
int PDGradientMachineForward(PD_GradiemtMachine machine,
PD_Arguments inArgs,
PD_Arguments outArgs,
bool isTrain) {
auto m = cast(machine);
auto in = paddle::capi::cast<paddle::capi::CArguments>(inArgs);
auto out = paddle::capi::cast<paddle::capi::CArguments>(outArgs);
if (m == nullptr || in == nullptr || out == nullptr || m->machine == nullptr)
return kPD_NULLPTR;
m->machine->forward(
in->args, &out->args, isTrain ? paddle::PASS_TRAIN : paddle::PASS_TEST);
return kPD_NO_ERROR;
}
int PDGradientMachineCreateSharedParam(PD_GradiemtMachine origin,
void* modelConfigProtobuf,
int size,
PD_GradiemtMachine* slave) {
auto o = cast(origin);
if (origin == nullptr || slave == nullptr || o->machine == nullptr) {
return kPD_NULLPTR;
}
paddle::ModelConfig config;
if (!config.ParseFromArray(modelConfigProtobuf, size) ||
!config.IsInitialized()) {
return kPD_PROTOBUF_ERROR;
}
std::unique_ptr<paddle::capi::CGradientMachine> ptr(
new paddle::capi::CGradientMachine());
auto nn = paddle::NeuralNetwork::create(config);
nn->init(config,
[&o](int paramId, paddle::Parameter* param) {
auto p = o->machine->getParameters()[paramId];
param->enableSharedType(paddle::PARAMETER_VALUE,
p->getBuf(paddle::PARAMETER_VALUE));
},
{paddle::PARAMETER_VALUE},
false);
ptr->machine.reset(nn);
*slave = ptr.release();
return kPD_NO_ERROR;
}
}
......@@ -24,6 +24,6 @@ int PDInit(int argc, char** argv) {
}
initPaddle(argc + 1, realArgv.data());
free(realArgv[0]);
return PD_NO_ERROR;
return kPD_NO_ERROR;
}
}
......@@ -8,27 +8,27 @@ int PDMatCreate(PD_Matrix* mat, uint64_t height, uint64_t width, bool useGpu) {
auto ptr = new paddle::capi::CMatrix();
ptr->mat = paddle::Matrix::create(height, width, false, useGpu);
*mat = ptr;
return PD_NO_ERROR;
return kPD_NO_ERROR;
}
int PDMatCreateNone(PD_Matrix* mat) {
auto ptr = new paddle::capi::CMatrix();
*mat = ptr;
return PD_NO_ERROR;
return kPD_NO_ERROR;
}
int PDMatDestroy(PD_Matrix mat) {
if (mat == nullptr) return PD_NULLPTR;
if (mat == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
delete ptr;
return PD_NO_ERROR;
return kPD_NO_ERROR;
}
int PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray) {
if (mat == nullptr) return PD_NULLPTR;
if (mat == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
if (ptr->mat == nullptr) return PD_NULLPTR;
if (rowID >= ptr->mat->getHeight()) return PD_OUT_OF_RANGE;
if (ptr->mat == nullptr) return kPD_NULLPTR;
if (rowID >= ptr->mat->getHeight()) return kPD_OUT_OF_RANGE;
paddle::real* buf = ptr->mat->getRowBuf(rowID);
size_t width = ptr->mat->getWidth();
#ifndef PADDLE_ONLY_CPU
......@@ -36,26 +36,26 @@ int PDMatCopyToRow(PD_Matrix mat, uint64_t rowID, pd_real* rowArray) {
#else
std::copy(rowArray, rowArray + width, buf);
#endif
return PD_NO_ERROR;
return kPD_NO_ERROR;
}
int PDMatGetRow(PD_Matrix mat, uint64_t rowID, pd_real** rawRowBuffer) {
if (mat == nullptr) return PD_NULLPTR;
if (mat == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
if (ptr->mat == nullptr) return PD_NULLPTR;
if (rowID >= ptr->mat->getHeight()) return PD_OUT_OF_RANGE;
if (ptr->mat == nullptr) return kPD_NULLPTR;
if (rowID >= ptr->mat->getHeight()) return kPD_OUT_OF_RANGE;
*rawRowBuffer = ptr->mat->getRowBuf(rowID);
return PD_NO_ERROR;
return kPD_NO_ERROR;
}
int PDMatGetShape(PD_Matrix mat, uint64_t* height, uint64_t* width) {
if (mat == nullptr) return PD_NULLPTR;
if (mat == nullptr) return kPD_NULLPTR;
if (height != nullptr) {
*height = cast(mat)->mat->getHeight();
}
if (width != nullptr) {
*width = cast(mat)->mat->getWidth();
}
return PD_NO_ERROR;
return kPD_NO_ERROR;
}
}
......@@ -8,20 +8,20 @@ extern "C" {
#endif
typedef enum {
PD_NO_ERROR = 0,
PD_NULLPTR = 1,
PD_OUT_OF_RANGE = 2,
PD_PROTOBUF_ERROR = 3,
PD_UNDEFINED_ERROR = -1,
kPD_NO_ERROR = 0,
kPD_NULLPTR = 1,
kPD_OUT_OF_RANGE = 2,
kPD_PROTOBUF_ERROR = 3,
kPD_UNDEFINED_ERROR = -1,
} PD_Error;
typedef void* PD_Vector;
typedef void* PD_IVector;
int PDVecCreate(PD_Vector* vec, uint64_t size, bool useGpu);
int PDIVecCreateNone(PD_IVector* ivec);
int PDVecDestroy(PD_Vector vec);
int PDIVecDestroy(PD_IVector ivec);
int PDVecIsSparse(PD_Vector vec, bool* isSparse);
int PDIVectorGet(PD_IVector ivec, int** buffer);
typedef void* PD_Matrix;
......@@ -51,12 +51,27 @@ int PDArgsSetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat);
int PDArgsGetValue(PD_Arguments args, uint64_t ID, PD_Matrix mat);
int PDArgsGetIds(PD_Arguments args, uint64_t ID, PD_IVector ids);
typedef void* PD_GradiemtMachine;
int PDGradientMachineCreateForPredict(PD_GradiemtMachine* machine,
void* modelConfigProtobuf,
int size);
int PDGradientMachineLoadParameterFromDisk(PD_GradiemtMachine machine,
const char* path);
int PDGradientMachineForward(PD_GradiemtMachine machine,
PD_Arguments inArgs,
PD_Arguments outArgs,
bool isTrain);
int PDGradientMachineCreateSharedParam(PD_GradiemtMachine origin,
void* modelConfigProtobuf,
int size,
PD_GradiemtMachine* slave);
int PDGradientMachineDestroy(PD_GradiemtMachine machine);
int PDInit(int argc, char** argv);
......
......@@ -8,20 +8,40 @@
namespace paddle {
namespace capi {
struct CVector {
VectorPtr vec;
enum CType { kIVECTOR = 0, kMATRIX, kARGUMENTS, kGRADIENT_MACHINE };
#define STRUCT_HEADER CType type;
struct CHeader {
STRUCT_HEADER
};
struct CIVector {
STRUCT_HEADER
IVectorPtr vec;
CIVector() : type(kIVECTOR) {}
};
struct CMatrix {
STRUCT_HEADER
MatrixPtr mat;
CMatrix() : type(kMATRIX) {}
};
struct CArguments {
STRUCT_HEADER
std::vector<paddle::Argument> args;
CArguments() : type(kARGUMENTS) {}
};
struct CGradientMachine {
STRUCT_HEADER
paddle::GradientMachinePtr machine;
CGradientMachine() : type(kGRADIENT_MACHINE) {}
};
template <typename T>
......
#include "PaddleCAPI.h"
#include "PaddleCAPIPrivate.h"
#define cast(v) paddle::capi::cast<paddle::capi::CVector>(v)
using paddle::capi::cast;
extern "C" {
int PDVecCreate(PD_Vector* vec, uint64_t size, bool useGpu) {
auto ptr = new paddle::capi::CVector();
ptr->vec = paddle::Vector::create(size, useGpu);
*vec = ptr;
return PD_NO_ERROR;
int PDIVecCreateNone(PD_IVector* ivec) {
if (ivec == nullptr) return kPD_NULLPTR;
auto ptr = new paddle::capi::CIVector();
*ivec = ptr;
return kPD_NO_ERROR;
}
int PDVecDestroy(PD_Vector vec) {
auto v = cast(vec);
v->vec.reset();
delete v;
return PD_NO_ERROR;
int PDIVecDestroy(PD_IVector ivec) {
if (ivec == nullptr) return kPD_NULLPTR;
delete cast<paddle::capi::CIVector>(ivec);
return kPD_NO_ERROR;
}
int PDVecIsSparse(PD_Vector vec, bool* isSparse) {
if (isSparse == nullptr || vec == nullptr) {
return PD_NULLPTR;
}
*isSparse = cast(vec)->vec->isSparse();
return PD_NO_ERROR;
int PDIVectorGet(PD_IVector ivec, int** buffer) {
if (ivec == nullptr || buffer == nullptr) return kPD_NULLPTR;
auto v = cast<paddle::capi::CIVector>(ivec);
if (v->vec == nullptr) return kPD_NULLPTR;
*buffer = v->vec->getData();
return kPD_NO_ERROR;
}
}
function(add_capi_unittest_without_exec TARGET_NAME)
set(with_test_main ON)
set(sources)
foreach(source_file ${ARGN})
if (${source_file} STREQUAL "NO_MAIN")
set(with_test_main OFF)
else()
list(APPEND sources ${source_file})
endif()
endforeach()
add_executable(
${TARGET_NAME}
${sources})
target_link_libraries(
${TARGET_NAME}
paddle_capi
${GTEST_LIBRARIES})
if (with_test_main)
target_link_libraries(
${TARGET_NAME} paddle_test_main)
endif()
target_include_directories(${TARGET_NAME} PUBLIC ${PADDLE_CAPI_INC_PATH})
endfunction()
function(add_capi_unittest TARGET_NAME)
add_capi_unittest_without_exec(${TARGET_NAME} ${ARGN})
add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME})
endfunction()
add_capi_unittest(capi_test_mats test_Vector.cpp
add_unittest(capi_test_mats test_Vector.cpp
test_Matrix.cpp test_Arguments.cpp)
add_capi_unittest(capi_test_gradientMachine NO_MAIN test_GradientMachine.cpp)
target_include_directories(capi_test_mats PUBLIC ${PADDLE_CAPI_INC_PATH})
target_link_libraries(capi_test_mats paddle_capi)
add_unittest(capi_test_gradientMachine test_GradientMachine.cpp)
target_include_directories(capi_test_gradientMachine PUBLIC
${PADDLE_CAPI_INC_PATH})
target_link_libraries(capi_test_gradientMachine paddle_capi)
......@@ -15,40 +15,44 @@ static std::vector<pd_real> randomBuffer(size_t bufSize) {
TEST(CAPIArguments, create) {
PD_Arguments args;
ASSERT_EQ(PD_NO_ERROR, PDArgsCreateNone(&args));
ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&args));
uint64_t size;
ASSERT_EQ(PD_NO_ERROR, PDArgsGetSize(args, &size));
ASSERT_EQ(kPD_NO_ERROR, PDArgsGetSize(args, &size));
ASSERT_EQ(0UL, size);
ASSERT_EQ(PD_NO_ERROR, PDArgsDestroy(args));
ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args));
}
TEST(CAPIArguments, value) {
PD_Arguments args;
ASSERT_EQ(PD_NO_ERROR, PDArgsCreateNone(&args));
ASSERT_EQ(PD_NO_ERROR, PDArgsResize(args, 1));
ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&args));
ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(args, 1));
PD_Matrix mat;
ASSERT_EQ(PD_NO_ERROR, PDMatCreate(&mat, 128, 64, false));
ASSERT_EQ(kPD_NO_ERROR, PDMatCreate(&mat, 128, 64, false));
for (size_t i = 0; i < 128; ++i) {
std::vector<pd_real> sampleBuf = randomBuffer(64);
PDMatCopyToRow(mat, i, sampleBuf.data());
}
ASSERT_EQ(PD_NO_ERROR, PDArgsSetValue(args, 0, mat));
ASSERT_EQ(kPD_NO_ERROR, PDArgsSetValue(args, 0, mat));
PD_Matrix val;
ASSERT_EQ(PD_NO_ERROR, PDMatCreateNone(&val));
ASSERT_EQ(kPD_NO_ERROR, PDMatCreateNone(&val));
ASSERT_EQ(PD_NO_ERROR, PDArgsGetValue(args, 0, val));
ASSERT_EQ(kPD_NO_ERROR, PDArgsGetValue(args, 0, val));
for (size_t i = 0; i < 128; ++i) {
pd_real* row1;
pd_real* row2;
ASSERT_EQ(PD_NO_ERROR, PDMatGetRow(mat, i, &row1));
ASSERT_EQ(PD_NO_ERROR, PDMatGetRow(val, i, &row2));
ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, i, &row1));
ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(val, i, &row2));
ASSERT_EQ(row1, row2);
}
ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(val));
ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(mat));
ASSERT_EQ(PD_NO_ERROR, PDArgsDestroy(args));
PD_IVector ivec;
ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&ivec));
ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(ivec));
ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(val));
ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat));
ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(args));
}
#include <gtest/gtest.h>
#include <paddle/gserver/gradientmachines/GradientMachine.h>
#include <paddle/trainer/TrainerConfigHelper.h>
#include <stdlib.h>
#include <string.h>
#include <type_traits>
#include "PaddleCAPI.h"
#include "paddle/utils/ThreadLocal.h"
TEST(GradientMachine, load) {
paddle::TrainerConfigHelper config("./vgg_16_cifar.py");
static std::vector<pd_real> randomBuffer(size_t bufSize) {
auto& eng = paddle::ThreadLocalRandomEngine::get();
std::uniform_real_distribution<pd_real> dist(-1.0, 1.0);
std::vector<pd_real> retv;
retv.reserve(bufSize);
for (size_t i = 0; i < bufSize; ++i) {
retv.push_back(dist(eng));
}
return retv;
}
TEST(GradientMachine, testPredict) {
paddle::TrainerConfigHelper config("./test_predict_network.py");
std::string buffer;
ASSERT_TRUE(config.getModelConfig().SerializeToString(&buffer));
PD_GradiemtMachine machine;
ASSERT_EQ(PD_NO_ERROR,
ASSERT_EQ(kPD_NO_ERROR,
PDGradientMachineCreateForPredict(
&machine, &buffer[0], (int)buffer.size()));
std::unique_ptr<paddle::GradientMachine> gm(
paddle::GradientMachine::create(config.getModelConfig()));
ASSERT_NE(nullptr, gm);
gm->randParameters();
gm->saveParameters("./");
ASSERT_EQ(kPD_NO_ERROR,
PDGradientMachineLoadParameterFromDisk(machine, "./"));
PD_GradiemtMachine machineSlave;
ASSERT_EQ(kPD_NO_ERROR,
PDGradientMachineCreateSharedParam(
machine, &buffer[0], (int)buffer.size(), &machineSlave));
std::swap(machineSlave, machine);
PD_Arguments outArgs;
ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&outArgs));
PD_Arguments inArgs;
ASSERT_EQ(kPD_NO_ERROR, PDArgsCreateNone(&inArgs));
ASSERT_EQ(kPD_NO_ERROR, PDArgsResize(inArgs, 1));
PD_Matrix mat;
ASSERT_EQ(kPD_NO_ERROR, PDMatCreate(&mat, 1, 100, false));
static_assert(std::is_same<pd_real, paddle::real>::value, "");
auto data = randomBuffer(100);
pd_real* rowPtr;
ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, 0, &rowPtr));
memcpy(rowPtr, data.data(), data.size() * sizeof(pd_real));
ASSERT_EQ(kPD_NO_ERROR, PDArgsSetValue(inArgs, 0, mat));
ASSERT_EQ(kPD_NO_ERROR,
PDGradientMachineForward(machine, inArgs, outArgs, false));
uint64_t sz;
ASSERT_EQ(kPD_NO_ERROR, PDArgsGetSize(outArgs, &sz));
ASSERT_EQ(1UL, sz);
ASSERT_EQ(kPD_NO_ERROR, PDArgsGetValue(outArgs, 0, mat));
std::vector<paddle::Argument> paddleInArgs;
std::vector<paddle::Argument> paddleOutArgs;
paddleInArgs.resize(1);
paddleInArgs[0].value =
paddle::Matrix::create(data.data(), 1, 100, false, false);
gm->forward(paddleInArgs, &paddleOutArgs, paddle::PASS_TEST);
auto matPaddle = paddleOutArgs[0].value;
uint64_t height, width;
ASSERT_EQ(kPD_NO_ERROR, PDMatGetShape(mat, &height, &width));
ASSERT_EQ(matPaddle->getHeight(), height);
ASSERT_EQ(matPaddle->getWidth(), width);
ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, 0, &rowPtr));
for (size_t i = 0; i < width; ++i) {
ASSERT_NEAR(matPaddle->getData()[i], rowPtr[i], 1e-5);
}
ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat));
ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(inArgs));
ASSERT_EQ(kPD_NO_ERROR, PDArgsDestroy(outArgs));
std::swap(machineSlave, machine);
ASSERT_EQ(kPD_NO_ERROR, PDGradientMachineDestroy(machineSlave));
ASSERT_EQ(kPD_NO_ERROR, PDGradientMachineDestroy(machine));
}
int main(int argc, char** argv) {
......
......@@ -3,31 +3,31 @@
TEST(CAPIMatrix, create) {
PD_Matrix mat;
ASSERT_EQ(PD_NO_ERROR, PDMatCreate(&mat, 128, 32, false));
ASSERT_EQ(kPD_NO_ERROR, PDMatCreate(&mat, 128, 32, false));
std::vector<pd_real> sampleRow;
sampleRow.resize(32);
for (size_t i = 0; i < sampleRow.size(); ++i) {
sampleRow[i] = 1.0 / (i + 1.0);
}
ASSERT_EQ(PD_NO_ERROR, PDMatCopyToRow(mat, 0, sampleRow.data()));
ASSERT_EQ(PD_OUT_OF_RANGE, PDMatCopyToRow(mat, 128, sampleRow.data()));
ASSERT_EQ(kPD_NO_ERROR, PDMatCopyToRow(mat, 0, sampleRow.data()));
ASSERT_EQ(kPD_OUT_OF_RANGE, PDMatCopyToRow(mat, 128, sampleRow.data()));
pd_real* arrayPtr;
ASSERT_EQ(PD_NO_ERROR, PDMatGetRow(mat, 0, &arrayPtr));
ASSERT_EQ(kPD_NO_ERROR, PDMatGetRow(mat, 0, &arrayPtr));
for (size_t i = 0; i < sampleRow.size(); ++i) {
ASSERT_NEAR(sampleRow[i], arrayPtr[i], 1e-5);
}
uint64_t height, width;
ASSERT_EQ(PD_NO_ERROR, PDMatGetShape(mat, &height, &width));
ASSERT_EQ(kPD_NO_ERROR, PDMatGetShape(mat, &height, &width));
ASSERT_EQ(128, height);
ASSERT_EQ(32, width);
ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(mat));
ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat));
}
TEST(CAPIMatrix, createNone) {
PD_Matrix mat;
ASSERT_EQ(PD_NO_ERROR, PDMatCreateNone(&mat));
ASSERT_EQ(PD_NO_ERROR, PDMatDestroy(mat));
ASSERT_EQ(kPD_NO_ERROR, PDMatCreateNone(&mat));
ASSERT_EQ(kPD_NO_ERROR, PDMatDestroy(mat));
}
......@@ -2,10 +2,7 @@
#include "gtest/gtest.h"
TEST(CAPIVector, create) {
PD_Vector tmp;
ASSERT_EQ(PD_NO_ERROR, PDVecCreate(&tmp, 128, false));
bool isSparse;
ASSERT_EQ(PD_NO_ERROR, PDVecIsSparse(tmp, &isSparse));
ASSERT_FALSE(isSparse);
ASSERT_EQ(PD_NO_ERROR, PDVecDestroy(tmp));
PD_IVector vec;
ASSERT_EQ(kPD_NO_ERROR, PDIVecCreateNone(&vec));
ASSERT_EQ(kPD_NO_ERROR, PDIVecDestroy(vec));
}
from paddle.trainer_config_helpers import *
settings(batch_size=100)
x = data_layer(name='x', size=100)
y = fc_layer(
input=x,
size=100,
bias_attr=ParamAttr(name='b'),
param_attr=ParamAttr(name='w'))
outputs(y)
../../../demo/image_classification/vgg_16_cifar.py
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册