提交 470bbcf9 编写于 作者: Y Yu Yang

Add example

上级 9c1c19b6
...@@ -39,7 +39,7 @@ paddle_error paddle_matrix_destroy(paddle_matrix mat) { ...@@ -39,7 +39,7 @@ paddle_error paddle_matrix_destroy(paddle_matrix mat) {
paddle_error paddle_matrix_set_row(paddle_matrix mat, paddle_error paddle_matrix_set_row(paddle_matrix mat,
uint64_t rowID, uint64_t rowID,
pd_real* rowArray) { paddle_real* rowArray) {
if (mat == nullptr) return kPD_NULLPTR; if (mat == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat); auto ptr = cast(mat);
if (ptr->mat == nullptr) return kPD_NULLPTR; if (ptr->mat == nullptr) return kPD_NULLPTR;
...@@ -56,7 +56,7 @@ paddle_error paddle_matrix_set_row(paddle_matrix mat, ...@@ -56,7 +56,7 @@ paddle_error paddle_matrix_set_row(paddle_matrix mat,
paddle_error paddle_matrix_get_row(paddle_matrix mat, paddle_error paddle_matrix_get_row(paddle_matrix mat,
uint64_t rowID, uint64_t rowID,
pd_real** rawRowBuffer) { paddle_real** rawRowBuffer) {
if (mat == nullptr) return kPD_NULLPTR; if (mat == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat); auto ptr = cast(mat);
if (ptr->mat == nullptr) return kPD_NULLPTR; if (ptr->mat == nullptr) return kPD_NULLPTR;
...@@ -78,3 +78,46 @@ paddle_error paddle_matrix_get_shape(paddle_matrix mat, ...@@ -78,3 +78,46 @@ paddle_error paddle_matrix_get_shape(paddle_matrix mat,
return kPD_NO_ERROR; return kPD_NO_ERROR;
} }
} }
paddle_matrix paddle_matrix_create_sparse(
uint64_t height, uint64_t width, uint64_t nnz, bool isBinary, bool useGpu) {
auto ptr = new paddle::capi::CMatrix();
ptr->mat = paddle::Matrix::createSparseMatrix(
height,
width,
nnz,
isBinary ? paddle::NO_VALUE : paddle::FLOAT_VALUE,
paddle::SPARSE_CSR,
false,
useGpu);
return ptr;
}
paddle_error paddle_matrix_sparse_copy_from(paddle_matrix mat,
int* rowArray,
uint64_t rowSize,
int* colArray,
uint64_t colSize,
float* valueArray,
uint64_t valueSize) {
if (mat == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
if (rowArray == nullptr || colArray == nullptr ||
(valueSize != 0 && valueArray == nullptr) || ptr->mat == nullptr) {
return kPD_NULLPTR;
}
if (auto sparseMat = dynamic_cast<paddle::CpuSparseMatrix*>(ptr->mat.get())) {
std::vector<int> row(rowSize);
row.assign(rowArray, rowArray + rowSize);
std::vector<int> col(colSize);
col.assign(colArray, colArray + colSize);
std::vector<paddle_real> val(valueSize);
if (valueSize) {
val.assign(valueArray, valueArray + valueSize);
}
sparseMat->copyFrom(row, col, val);
return kPD_NO_ERROR;
} else {
return kPD_NOT_SUPPORTED;
}
}
#ifndef __PADDLE_PADDLE_CAPI_CONFIG_H_INCLUDED__ #ifndef __PADDLE_PADDLE_CAPI_CONFIG_H_INCLUDED__
#define __PADDLE_PADDLE_CAPI_CONFIG_H_INCLUDED__ #define __PADDLE_PADDLE_CAPI_CONFIG_H_INCLUDED__
typedef @PADDLE_FLOAT_TYPE@ pd_real; typedef @PADDLE_FLOAT_TYPE@ paddle_real;
// Since we only support linux and macos in compile, always use clang or // Since we only support linux and macos in compile, always use clang or
// gcc 4.8+. DLL_IMPORT/DLL_EXPORT is as simple as below. // gcc 4.8+. DLL_IMPORT/DLL_EXPORT is as simple as below.
......
...@@ -23,6 +23,7 @@ typedef enum { ...@@ -23,6 +23,7 @@ typedef enum {
kPD_NULLPTR = 1, kPD_NULLPTR = 1,
kPD_OUT_OF_RANGE = 2, kPD_OUT_OF_RANGE = 2,
kPD_PROTOBUF_ERROR = 3, kPD_PROTOBUF_ERROR = 3,
kPD_NOT_SUPPORTED = 4,
kPD_UNDEFINED_ERROR = -1, kPD_UNDEFINED_ERROR = -1,
} paddle_error; } paddle_error;
......
#ifndef __CAPI_EXAMPLE_COMMON_H__
#define __CAPI_EXAMPLE_COMMON_H__
#include <stdio.h>
#include <stdlib.h>
#define CHECK(stmt) \
do { \
paddle_error __err__ = stmt; \
if (__err__ != kPD_NO_ERROR) { \
fprintf(stderr, "Invoke paddle error %d \n" #stmt, __err__); \
exit(__err__); \
} \
} while (0)
void* read_config(const char* filename, long* size) {
FILE* file = fopen(filename, "r");
if (file == NULL) return NULL;
fseek(file, 0L, SEEK_END);
*size = ftell(file);
fseek(file, 0L, SEEK_SET);
void* buf = malloc(*size);
fread(buf, 1, *size, file);
fclose(file);
return buf;
}
#endif
project(dense)
cmake_minimum_required(VERSION 2.8)
aux_source_directory(. SRC_LIST)
add_executable(${PROJECT_NAME} ${SRC_LIST})
set_property(TARGET ${PROJECT_NAME} PROPERTY C_STANDARD 99)
target_link_libraries(${PROJECT_NAME} -lpaddle_capi_shared)
#!/bin/bash
python -m paddle.utils.dump_config trainer_config.py '' --binary > trainer_config.bin
#include <paddle/capi.h>
#include <time.h>
#include "../common/common.h"
#define CONFIG_BIN "./trainer_config.bin"
int main() {
// Initalize Paddle
char* argv[] = {"--use_gpu=False"};
CHECK(paddle_init(1, (char**)argv));
// Reading config binary file. It is generated by `convert_protobin.sh`
long size;
void* buf = read_config(CONFIG_BIN, &size);
// Create a gradient machine for inference.
paddle_gradient_machine machine;
CHECK(paddle_gradient_machine_create_for_inference(&machine, buf, (int)size));
CHECK(paddle_gradient_machine_randomize_param(machine));
// Loading parameter. Uncomment the following line and change the directory.
// CHECK(paddle_gradient_machine_load_parameter_from_disk(machine,
// "./some_where_to_params"));
paddle_arguments in_args = paddle_arguments_create_none();
// There is only one input of this network.
CHECK(paddle_arguments_resize(in_args, 1));
// Create input matrix.
paddle_matrix mat = paddle_matrix_create(/* sample_num */ 1,
/* size */ 784,
/* useGPU */ false);
srand(time(0));
paddle_real* array;
// Get First row.
CHECK(paddle_matrix_get_row(mat, 0, &array));
for (int i = 0; i < 784; ++i) {
array[i] = rand() / ((float)RAND_MAX);
}
CHECK(paddle_arguments_set_value(in_args, 0, mat));
paddle_arguments out_args = paddle_arguments_create_none();
CHECK(paddle_gradient_machine_forward(machine,
in_args,
out_args,
/* isTrain */ false));
paddle_matrix prob = paddle_matrix_create_none();
CHECK(paddle_arguments_value(out_args, 0, prob));
CHECK(paddle_matrix_get_row(prob, 0, &array));
printf("Prob: ");
for (int i = 0; i < 10; ++i) {
printf("%.2f ", array[i]);
}
printf("\n");
return 0;
}
from paddle.trainer_config_helpers import *
img = data_layer(name='pixel', size=784)
hidden = fc_layer(
input=img,
size=200,
param_attr=ParamAttr(name='hidden.w'),
bias_attr=ParamAttr(name='hidden.b'))
prob = fc_layer(
input=hidden,
size=10,
act=SoftmaxActivation(),
param_attr=ParamAttr(name='prob.w'),
bias_attr=ParamAttr(name='prob.b'))
outputs(prob)
# This file is used to ignore files which are generated
# ----------------------------------------------------------------------------
*~
*.autosave
*.a
*.core
*.moc
*.o
*.obj
*.orig
*.rej
*.so
*.so.*
*_pch.h.cpp
*_resource.rc
*.qm
.#*
*.*#
core
!core/
tags
.DS_Store
.directory
*.debug
Makefile*
*.prl
*.app
moc_*.cpp
ui_*.h
qrc_*.cpp
Thumbs.db
*.res
*.rc
/.qmake.cache
/.qmake.stash
# qtcreator generated files
*.pro.user*
# xemacs temporary files
*.flc
# Vim temporary files
.*.swp
# Visual Studio generated files
*.ib_pdb_index
*.idb
*.ilk
*.pdb
*.sln
*.suo
*.vcproj
*vcproj.*.*.user
*.ncb
*.sdf
*.opensdf
*.vcxproj
*vcxproj.*
# MinGW generated files
*.Debug
*.Release
# Python byte code
*.pyc
# Binaries
# --------
*.dll
*.exe
project(multi_thread)
cmake_minimum_required(VERSION 2.8)
aux_source_directory(. SRC_LIST)
add_executable(${PROJECT_NAME} ${SRC_LIST})
find_package (Threads)
set_property(TARGET ${PROJECT_NAME} PROPERTY C_STANDARD 99)
target_link_libraries(${PROJECT_NAME} -lpaddle_capi_shared
${CMAKE_THREAD_LIBS_INIT})
../dense/convert_protobin.sh
\ No newline at end of file
#include <paddle/capi.h>
#include <pthread.h>
#include <time.h>
#include "../common/common.h"
#define CONFIG_BIN "./trainer_config.bin"
#define NUM_THREAD 1000
#define NUM_ITER 1000
pthread_mutex_t mutex;
void* thread_main(void* gm_ptr) {
paddle_gradient_machine machine = (paddle_gradient_machine)(gm_ptr);
for (int iter = 0; iter < NUM_ITER; ++iter) {
paddle_arguments in_args = paddle_arguments_create_none();
// There is only one input of this network.
CHECK(paddle_arguments_resize(in_args, 1));
// Create input matrix.
paddle_matrix mat = paddle_matrix_create(/* sample_num */ 1,
/* size */ 784,
/* useGPU */ false);
paddle_real* array;
// Get First row.
CHECK(paddle_matrix_get_row(mat, 0, &array));
for (int i = 0; i < 784; ++i) {
array[i] = rand() / ((float)RAND_MAX);
}
CHECK(paddle_arguments_set_value(in_args, 0, mat));
paddle_arguments out_args = paddle_arguments_create_none();
CHECK(paddle_gradient_machine_forward(machine,
in_args,
out_args,
/* isTrain */ false));
paddle_matrix prob = paddle_matrix_create_none();
CHECK(paddle_arguments_value(out_args, 0, prob));
CHECK(paddle_matrix_get_row(prob, 0, &array));
pthread_mutex_lock(&mutex);
printf("Prob: ");
for (int i = 0; i < 10; ++i) {
printf("%.2f ", array[i]);
}
printf("\n");
pthread_mutex_unlock(&mutex);
}
CHECK(paddle_gradient_machine_destroy(machine));
return NULL;
}
int main() {
// Initalize Paddle
char* argv[] = {"--use_gpu=False"};
CHECK(paddle_init(1, (char**)argv));
// Reading config binary file. It is generated by `convert_protobin.sh`
long size;
void* buf = read_config(CONFIG_BIN, &size);
// Create a gradient machine for inference.
paddle_gradient_machine machine;
CHECK(paddle_gradient_machine_create_for_inference(&machine, buf, (int)size));
CHECK(paddle_gradient_machine_randomize_param(machine));
// Loading parameter. Uncomment the following line and change the directory.
// CHECK(paddle_gradient_machine_load_parameter_from_disk(machine,
// "./some_where_to_params"));
srand(time(0));
pthread_mutex_init(&mutex, NULL);
pthread_t threads[NUM_THREAD];
for (int i = 0; i < NUM_THREAD; ++i) {
paddle_gradient_machine thread_local_machine;
CHECK(paddle_gradient_machine_create_shared_param(
machine, buf, size, &thread_local_machine));
pthread_create(&threads[i], NULL, thread_main, thread_local_machine);
}
for (int i = 0; i < NUM_THREAD; ++i) {
pthread_join(threads[i], NULL);
}
pthread_mutex_destroy(&mutex);
return 0;
}
../dense/trainer_config.py
\ No newline at end of file
# This file is used to ignore files which are generated
# ----------------------------------------------------------------------------
*~
*.autosave
*.a
*.core
*.moc
*.o
*.obj
*.orig
*.rej
*.so
*.so.*
*_pch.h.cpp
*_resource.rc
*.qm
.#*
*.*#
core
!core/
tags
.DS_Store
.directory
*.debug
Makefile*
*.prl
*.app
moc_*.cpp
ui_*.h
qrc_*.cpp
Thumbs.db
*.res
*.rc
/.qmake.cache
/.qmake.stash
# qtcreator generated files
*.pro.user*
# xemacs temporary files
*.flc
# Vim temporary files
.*.swp
# Visual Studio generated files
*.ib_pdb_index
*.idb
*.ilk
*.pdb
*.sln
*.suo
*.vcproj
*vcproj.*.*.user
*.ncb
*.sdf
*.opensdf
*.vcxproj
*vcxproj.*
# MinGW generated files
*.Debug
*.Release
# Python byte code
*.pyc
# Binaries
# --------
*.dll
*.exe
project(sparse_binary)
cmake_minimum_required(VERSION 2.8)
aux_source_directory(. SRC_LIST)
add_executable(${PROJECT_NAME} ${SRC_LIST})
find_package (Threads)
set_property(TARGET ${PROJECT_NAME} PROPERTY C_STANDARD 99)
target_link_libraries(${PROJECT_NAME} -lpaddle_capi_shared)
../dense/convert_protobin.sh
\ No newline at end of file
#include <paddle/capi.h>
#include <time.h>
#include "../common/common.h"
#define CONFIG_BIN "./trainer_config.bin"
int main() {
// Initalize Paddle
char* argv[] = {"--use_gpu=False"};
CHECK(paddle_init(1, (char**)argv));
// Reading config binary file. It is generated by `convert_protobin.sh`
long size;
void* buf = read_config(CONFIG_BIN, &size);
// Create a gradient machine for inference.
paddle_gradient_machine machine;
CHECK(paddle_gradient_machine_create_for_inference(&machine, buf, (int)size));
CHECK(paddle_gradient_machine_randomize_param(machine));
// Loading parameter. Uncomment the following line and change the directory.
// CHECK(paddle_gradient_machine_load_parameter_from_disk(machine,
// "./some_where_to_params"));
paddle_arguments in_args = paddle_arguments_create_none();
// There is only one input of this network.
CHECK(paddle_arguments_resize(in_args, 1));
// Create input matrix.
paddle_matrix mat = paddle_matrix_create_sparse(1, 784, 3, true, false);
srand(time(0));
paddle_real* array;
int colBuf[] = {9, 93, 109};
int rowBuf[] = {0, sizeof(colBuf) / sizeof(int)};
CHECK(paddle_matrix_sparse_copy_from(mat,
rowBuf,
sizeof(rowBuf) / sizeof(int),
colBuf,
sizeof(colBuf) / sizeof(int),
NULL,
0));
CHECK(paddle_arguments_set_value(in_args, 0, mat));
paddle_arguments out_args = paddle_arguments_create_none();
CHECK(paddle_gradient_machine_forward(machine,
in_args,
out_args,
/* isTrain */ false));
paddle_matrix prob = paddle_matrix_create_none();
CHECK(paddle_arguments_value(out_args, 0, prob));
CHECK(paddle_matrix_get_row(prob, 0, &array));
printf("Prob: ");
for (int i = 0; i < 10; ++i) {
printf("%.2f ", array[i]);
}
printf("\n");
return 0;
}
../dense/trainer_config.py
\ No newline at end of file
...@@ -113,3 +113,11 @@ paddle_error paddle_gradient_machine_create_shared_param( ...@@ -113,3 +113,11 @@ paddle_error paddle_gradient_machine_create_shared_param(
return kPD_NO_ERROR; return kPD_NO_ERROR;
} }
} }
paddle_error paddle_gradient_machine_randomize_param(
paddle_gradient_machine machine) {
auto m = cast(machine);
if (m == nullptr || m->machine == nullptr) return kPD_NULLPTR;
m->machine->randParameters();
return kPD_NO_ERROR;
}
...@@ -74,6 +74,9 @@ paddle_gradient_machine_create_shared_param(paddle_gradient_machine origin, ...@@ -74,6 +74,9 @@ paddle_gradient_machine_create_shared_param(paddle_gradient_machine origin,
int size, int size,
paddle_gradient_machine* slave); paddle_gradient_machine* slave);
PD_API paddle_error
paddle_gradient_machine_randomize_param(paddle_gradient_machine machine);
/** /**
* @brief Destroy a gradient machine * @brief Destroy a gradient machine
* @param machine that need to destroy * @param machine that need to destroy
......
...@@ -15,6 +15,7 @@ limitations under the License. */ ...@@ -15,6 +15,7 @@ limitations under the License. */
#ifndef __PADDLE_CAPI_MATRIX_H__ #ifndef __PADDLE_CAPI_MATRIX_H__
#define __PADDLE_CAPI_MATRIX_H__ #define __PADDLE_CAPI_MATRIX_H__
#include <stdbool.h>
#include <stdint.h> #include <stdint.h>
#include "config.h" #include "config.h"
#include "error.h" #include "error.h"
...@@ -39,6 +40,18 @@ PD_API paddle_matrix paddle_matrix_create(uint64_t height, ...@@ -39,6 +40,18 @@ PD_API paddle_matrix paddle_matrix_create(uint64_t height,
uint64_t width, uint64_t width,
bool useGpu); bool useGpu);
/**
* @brief paddle_matrix_create_sparse Create a sparse matrix.
* @param height the matrix height.
* @param width the matrix width.
* @param nnz the number of non-zero elements.
* @param isBinary is binary (either 1 or 0 in matrix) or not.
* @param useGpu is using GPU or not.
* @return paddle_matrix.
*/
PD_API paddle_matrix paddle_matrix_create_sparse(
uint64_t height, uint64_t width, uint64_t nnz, bool isBinary, bool useGpu);
/** /**
* @brief paddle_matrix_destroy Destroy a matrix. * @brief paddle_matrix_destroy Destroy a matrix.
* @param mat * @param mat
...@@ -55,7 +68,7 @@ PD_API paddle_error paddle_matrix_destroy(paddle_matrix mat); ...@@ -55,7 +68,7 @@ PD_API paddle_error paddle_matrix_destroy(paddle_matrix mat);
*/ */
PD_API paddle_error paddle_matrix_set_row(paddle_matrix mat, PD_API paddle_error paddle_matrix_set_row(paddle_matrix mat,
uint64_t rowID, uint64_t rowID,
pd_real* rowArray); paddle_real* rowArray);
/** /**
* @brief PDMatGetRow Get raw row buffer from matrix * @brief PDMatGetRow Get raw row buffer from matrix
...@@ -66,7 +79,7 @@ PD_API paddle_error paddle_matrix_set_row(paddle_matrix mat, ...@@ -66,7 +79,7 @@ PD_API paddle_error paddle_matrix_set_row(paddle_matrix mat,
*/ */
PD_API paddle_error paddle_matrix_get_row(paddle_matrix mat, PD_API paddle_error paddle_matrix_get_row(paddle_matrix mat,
uint64_t rowID, uint64_t rowID,
pd_real** rawRowBuffer); paddle_real** rawRowBuffer);
/** /**
* @brief PDMatCreateNone Create None Matrix * @brief PDMatCreateNone Create None Matrix
...@@ -85,6 +98,27 @@ PD_API paddle_error paddle_matrix_get_shape(paddle_matrix mat, ...@@ -85,6 +98,27 @@ PD_API paddle_error paddle_matrix_get_shape(paddle_matrix mat,
uint64_t* height, uint64_t* height,
uint64_t* width); uint64_t* width);
/**
* @brief paddle_matrix_sparse_copy_from Copy from a CSR format matrix
* @param [out] mat output matrix
* @param [in] rowArray row array. The array slices in column array.
* @param [in] rowSize length of row array.
* @param [in] colArray the column array. It means the non-zero element indices
* in each row.
* @param [in] colSize length of column array.
* @param [in] valueArray the value array. It means the non-zero elemnt values.
* NULL if the matrix is binary.
* @param [in] valueSize length of value array. Zero if the matrix is binary.
* @return paddle_error
*/
PD_API paddle_error paddle_matrix_sparse_copy_from(paddle_matrix mat,
int* rowArray,
uint64_t rowSize,
int* colArray,
uint64_t colSize,
float* valueArray,
uint64_t valueSize);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -17,10 +17,10 @@ limitations under the License. */ ...@@ -17,10 +17,10 @@ limitations under the License. */
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "paddle/utils/ThreadLocal.h" #include "paddle/utils/ThreadLocal.h"
static std::vector<pd_real> randomBuffer(size_t bufSize) { static std::vector<paddle_real> randomBuffer(size_t bufSize) {
auto& eng = paddle::ThreadLocalRandomEngine::get(); auto& eng = paddle::ThreadLocalRandomEngine::get();
std::uniform_real_distribution<pd_real> dist(-1.0, 1.0); std::uniform_real_distribution<paddle_real> dist(-1.0, 1.0);
std::vector<pd_real> retv; std::vector<paddle_real> retv;
retv.reserve(bufSize); retv.reserve(bufSize);
for (size_t i = 0; i < bufSize; ++i) { for (size_t i = 0; i < bufSize; ++i) {
retv.push_back(dist(eng)); retv.push_back(dist(eng));
...@@ -42,7 +42,7 @@ TEST(CAPIArguments, value) { ...@@ -42,7 +42,7 @@ TEST(CAPIArguments, value) {
paddle_matrix mat = paddle_matrix_create(128, 64, false); paddle_matrix mat = paddle_matrix_create(128, 64, false);
for (size_t i = 0; i < 128; ++i) { for (size_t i = 0; i < 128; ++i) {
std::vector<pd_real> sampleBuf = randomBuffer(64); std::vector<paddle_real> sampleBuf = randomBuffer(64);
paddle_matrix_set_row(mat, i, sampleBuf.data()); paddle_matrix_set_row(mat, i, sampleBuf.data());
} }
ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_set_value(args, 0, mat)); ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_set_value(args, 0, mat));
...@@ -52,8 +52,8 @@ TEST(CAPIArguments, value) { ...@@ -52,8 +52,8 @@ TEST(CAPIArguments, value) {
ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_value(args, 0, val)); ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_value(args, 0, val));
for (size_t i = 0; i < 128; ++i) { for (size_t i = 0; i < 128; ++i) {
pd_real* row1; paddle_real* row1;
pd_real* row2; paddle_real* row2;
ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(mat, i, &row1)); ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(mat, i, &row1));
ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(val, i, &row2)); ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(val, i, &row2));
......
...@@ -21,10 +21,10 @@ limitations under the License. */ ...@@ -21,10 +21,10 @@ limitations under the License. */
#include "capi.h" #include "capi.h"
#include "paddle/utils/ThreadLocal.h" #include "paddle/utils/ThreadLocal.h"
static std::vector<pd_real> randomBuffer(size_t bufSize) { static std::vector<paddle_real> randomBuffer(size_t bufSize) {
auto& eng = paddle::ThreadLocalRandomEngine::get(); auto& eng = paddle::ThreadLocalRandomEngine::get();
std::uniform_real_distribution<pd_real> dist(-1.0, 1.0); std::uniform_real_distribution<paddle_real> dist(-1.0, 1.0);
std::vector<pd_real> retv; std::vector<paddle_real> retv;
retv.reserve(bufSize); retv.reserve(bufSize);
for (size_t i = 0; i < bufSize; ++i) { for (size_t i = 0; i < bufSize; ++i) {
retv.push_back(dist(eng)); retv.push_back(dist(eng));
...@@ -60,12 +60,12 @@ TEST(GradientMachine, testPredict) { ...@@ -60,12 +60,12 @@ TEST(GradientMachine, testPredict) {
paddle_arguments inArgs = paddle_arguments_create_none(); paddle_arguments inArgs = paddle_arguments_create_none();
ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_resize(inArgs, 1)); ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_resize(inArgs, 1));
paddle_matrix mat = paddle_matrix_create(1, 100, false); paddle_matrix mat = paddle_matrix_create(1, 100, false);
static_assert(std::is_same<pd_real, paddle::real>::value, ""); static_assert(std::is_same<paddle_real, paddle::real>::value, "");
auto data = randomBuffer(100); auto data = randomBuffer(100);
pd_real* rowPtr; paddle_real* rowPtr;
ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(mat, 0, &rowPtr)); ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(mat, 0, &rowPtr));
memcpy(rowPtr, data.data(), data.size() * sizeof(pd_real)); memcpy(rowPtr, data.data(), data.size() * sizeof(paddle_real));
ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_set_value(inArgs, 0, mat)); ASSERT_EQ(kPD_NO_ERROR, paddle_arguments_set_value(inArgs, 0, mat));
ASSERT_EQ(kPD_NO_ERROR, ASSERT_EQ(kPD_NO_ERROR,
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
TEST(CAPIMatrix, create) { TEST(CAPIMatrix, create) {
paddle_matrix mat = paddle_matrix_create(128, 32, false); paddle_matrix mat = paddle_matrix_create(128, 32, false);
std::vector<pd_real> sampleRow; std::vector<paddle_real> sampleRow;
sampleRow.resize(32); sampleRow.resize(32);
for (size_t i = 0; i < sampleRow.size(); ++i) { for (size_t i = 0; i < sampleRow.size(); ++i) {
sampleRow[i] = 1.0 / (i + 1.0); sampleRow[i] = 1.0 / (i + 1.0);
...@@ -26,7 +26,7 @@ TEST(CAPIMatrix, create) { ...@@ -26,7 +26,7 @@ TEST(CAPIMatrix, create) {
ASSERT_EQ(kPD_OUT_OF_RANGE, ASSERT_EQ(kPD_OUT_OF_RANGE,
paddle_matrix_set_row(mat, 128, sampleRow.data())); paddle_matrix_set_row(mat, 128, sampleRow.data()));
pd_real* arrayPtr; paddle_real* arrayPtr;
ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(mat, 0, &arrayPtr)); ASSERT_EQ(kPD_NO_ERROR, paddle_matrix_get_row(mat, 0, &arrayPtr));
for (size_t i = 0; i < sampleRow.size(); ++i) { for (size_t i = 0; i < sampleRow.size(); ++i) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册