提交 81da8549 编写于 作者: T Tao Luo

remove legacy C++ code

上级 958ca2c7
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
#include "paddle/legacy/parameter/Argument.h"
size_t Arguments::getSlotNum() const { return m->outputs.size(); }
Arguments* Arguments::createArguments(size_t slotNum) {
auto args = new Arguments();
args->m->outputs.resize(slotNum);
return args;
}
void Arguments::resize(size_t slotNum) { m->outputs.resize(slotNum); }
Arguments::Arguments() : m(new ArgumentsPrivate()) {}
Arguments::~Arguments() { delete m; }
Arguments* Arguments::createByPaddleArgumentVector(void* ptr) {
auto p = (std::vector<paddle::Argument>*)(ptr);
auto args = new Arguments();
args->m->outputs = *p;
return args;
}
Arguments* Arguments::createByPaddleArgument(const void* ptr) {
auto p = (paddle::Argument*)(ptr);
auto args = new Arguments();
args->m->outputs.push_back(*p);
return args;
}
Matrix* Arguments::getSlotValue(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return Matrix::createByPaddleMatrixPtr(&a.value);
}
Matrix* Arguments::getSlotGrad(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return Matrix::createByPaddleMatrixPtr(&a.grad);
}
IVector* Arguments::getSlotIds(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return IVector::createByPaddleVectorPtr(&a.ids);
}
Matrix* Arguments::getSlotIn(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return Matrix::createByPaddleMatrixPtr(&a.in);
}
void Arguments::setSlotValue(size_t idx, Matrix* mat) throw(RangeError) {
auto& a = m->getArg(idx);
a.value = m->cast<paddle::Matrix>(mat->getSharedPtr());
}
void Arguments::setSlotGrad(size_t idx, Matrix* mat) throw(RangeError) {
auto& a = m->getArg(idx);
a.grad = m->cast<paddle::Matrix>(mat->getSharedPtr());
}
void Arguments::setSlotIn(size_t idx, Matrix* mat) throw(RangeError) {
auto& a = m->getArg(idx);
a.in = m->cast<paddle::Matrix>(mat->getSharedPtr());
}
void Arguments::setSlotIds(size_t idx, IVector* vec) throw(RangeError) {
auto& a = m->getArg(idx);
auto& v = m->cast<paddle::IVector>(vec->getSharedPtr());
a.ids = v;
}
template <typename T1>
static inline void doCopyFromSafely(std::shared_ptr<T1>& dest,
std::shared_ptr<T1>& src) {
if (src) {
if (dest) {
dest->copyFrom(*src);
} else {
dest = src;
}
}
}
IVector* Arguments::getSlotSequenceStartPositions(size_t idx) const
throw(RangeError) {
auto& a = m->getArg(idx);
if (a.sequenceStartPositions) {
return IVector::createByPaddleVectorPtr(
&a.sequenceStartPositions->getMutableVector(false));
} else {
return nullptr;
}
}
IVector* Arguments::getSlotSubSequenceStartPositions(size_t idx) const
throw(RangeError) {
auto& a = m->getArg(idx);
if (a.subSequenceStartPositions) {
return IVector::createByPaddleVectorPtr(
&a.subSequenceStartPositions->getMutableVector(false));
} else {
return nullptr;
}
}
void Arguments::setSlotSequenceStartPositions(size_t idx,
IVector* vec) throw(RangeError) {
auto& a = m->getArg(idx);
auto& v = m->cast<paddle::IVector>(vec->getSharedPtr());
a.sequenceStartPositions = std::make_shared<paddle::ICpuGpuVector>(v);
}
void Arguments::setSlotSubSequenceStartPositions(
size_t idx, IVector* vec) throw(RangeError) {
auto& a = m->getArg(idx);
auto& v = m->cast<paddle::IVector>(vec->getSharedPtr());
a.subSequenceStartPositions = std::make_shared<paddle::ICpuGpuVector>(v);
}
IVector* Arguments::getSlotSequenceDim(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return IVector::createByPaddleVectorPtr(&a.cpuSequenceDims);
}
void Arguments::setSlotSequenceDim(size_t idx, IVector* vec) throw(RangeError) {
auto& a = m->getArg(idx);
a.cpuSequenceDims = m->cast<paddle::IVector>(vec->getSharedPtr());
}
float Arguments::sum() const { return paddle::Argument::sum(m->outputs); }
int64_t Arguments::getBatchSize(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return a.getBatchSize();
}
void Arguments::setSlotFrameHeight(size_t idx, size_t h) throw(RangeError) {
auto& a = m->getArg(idx);
a.setFrameHeight(h);
}
void Arguments::setSlotFrameWidth(size_t idx, size_t w) throw(RangeError) {
auto& a = m->getArg(idx);
a.setFrameWidth(w);
}
size_t Arguments::getSlotFrameHeight(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return a.getFrameHeight();
}
size_t Arguments::getSlotFrameWidth(size_t idx) const throw(RangeError) {
auto& a = m->getArg(idx);
return a.getFrameWidth();
}
void* Arguments::getInternalArgumentsPtr() const { return &m->outputs; }
set(API_SOURCES
Arguments.cpp
ConfigParser.cpp
Evaluator.cpp
GradientMachine.cpp
Matrix.cpp
Parameter.cpp
ParameterOptimizer.cpp
ParameterUpdater.cpp
SequenceGenerator.cpp
Trainer.cpp
Util.cpp
Vector.cpp)
set(API_HEADER
PaddleAPI.h
Internal.h)
add_library(paddle_api STATIC ${API_SOURCES})
add_dependencies(paddle_api paddle_proto paddle_trainer_lib)
INCLUDE(${SWIG_USE_FILE})
INCLUDE_DIRECTORIES(${PADDLE_SOURCE_DIR}/paddle)
FILE(GLOB PY_PADDLE_PYTHON_FILES ${PADDLE_SOURCE_DIR}/paddle/py_paddle/*.py)
SET_SOURCE_FILES_PROPERTIES(Paddle.i PROPERTIES CPLUSPLUS ON)
SET(SWIG_NEED_FLAGS
-ftls-model=global-dynamic
-Wno-parentheses-equality
-Wno-self-assign
-Wno-maybe-uninitialized
-Wno-missing-field-initializers)
FOREACH(flag ${SWIG_NEED_FLAGS})
safe_set_cxxflag(SWIG_CXX_FLAGS ${flag})
ENDFOREACH()
SET(CMAKE_SWIG_OUTDIR ${CMAKE_CURRENT_BINARY_DIR})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SWIG_CXX_FLAGS}")
SET(SWIG_MODULE_swig_paddle_EXTRA_DEPS
paddle_parameter
paddle_function
paddle_math
paddle_utils
paddle_gserver
paddle_pserver
paddle_api
paddle_cuda
paddle_trainer_lib
paddle_network
paddle_proto
${external_project_dependencies}
${RDMA_LIBS}
)
IF(APPLE)
SET(MACOS_LD_FLAGS "-undefined dynamic_lookup -Wl,-all_load -framework CoreFoundation -framework Security")
ELSE(APPLE)
SET(START_GROUP "-Xlinker -start-group")
SET(END_GROUP "-Xlinker -end-group")
SET(ARCHIVE_START "-Wl,--whole-archive")
SET(ARCHIVE_END "-Wl,--no-whole-archive")
ENDIF(APPLE)
SWIG_ADD_MODULE(swig_paddle python Paddle.i)
SWIG_LINK_LIBRARIES(swig_paddle
${MACOS_LD_FLAGS}
${START_GROUP}
${ARCHIVE_START}
paddle_gserver
paddle_function
${METRIC_LIBS}
${ARCHIVE_END}
paddle_pserver
paddle_trainer_lib
paddle_network
paddle_parameter
paddle_optimizer
paddle_math
paddle_utils
paddle_proto
paddle_cuda
paddle_api
${CMAKE_DL_LIBS}
${EXTERNAL_LIBS}
${CMAKE_THREAD_LIBS_INIT}
${RDMA_LD_FLAGS}
${START_END}
)
add_custom_command(OUTPUT ${PADDLE_BINARY_DIR}/python/py_paddle/_swig_paddle.so
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/py_paddle
COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/swig_paddle.py ${PADDLE_BINARY_DIR}/python/py_paddle
COMMAND cp ${CMAKE_CURRENT_BINARY_DIR}/_swig_paddle.so ${PADDLE_BINARY_DIR}/python/py_paddle
COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/.timestamp
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle
DEPENDS _swig_paddle
)
# TODO(yuyang18) : make wheel name calculated by cmake
add_custom_target(python_api_wheel ALL DEPENDS ${PADDLE_BINARY_DIR}/python/py_paddle/_swig_paddle.so)
if(WITH_TESTING)
IF(NOT PY_PIP_FOUND)
SET(PIP_SOURCES_DIR ${PYTHON_SOURCES_DIR}/pip)
ExternalProject_Add(pip
${EXTERNAL_PROJECT_LOG_ARGS}
GIT_REPOSITORY https://github.com/pypa/pip.git
GIT_TAG 9.0.1
PREFIX ${PIP_SOURCES_DIR}
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install
BUILD_IN_SOURCE 1
#DEPENDS python setuptools python_api_wheel
)
ENDIF()
add_subdirectory(test)
endif()
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
#include "paddle/legacy/trainer/Trainer.h"
struct ParameterConfigPrivate {
paddle::ParameterPtr parameter;
paddle::ParameterConfig config;
inline paddle::ParameterConfig* getConfigPtr() {
if (parameter != nullptr) {
auto& conf = parameter->getConfig();
return const_cast<paddle::ParameterConfig*>(&conf);
} else {
return &config;
}
}
};
TrainerConfig::TrainerConfig() : m(new TrainerConfigPrivate()) {}
TrainerConfig::~TrainerConfig() { delete m; }
TrainerConfig* TrainerConfig::createFromTrainerConfigFile(
const std::string& confPath) {
LOG(INFO) << "load trainer config from " << confPath;
auto conf = std::make_shared<paddle::TrainerConfigHelper>(confPath);
auto retv = new TrainerConfig();
retv->m->conf = conf;
return retv;
}
TrainerConfig* TrainerConfig::createFromProtoString(const std::string& str) {
auto retv = new TrainerConfig();
paddle::TrainerConfig trainerConfigProto;
auto conf = std::make_shared<paddle::TrainerConfigHelper>(trainerConfigProto);
CHECK(conf->getMutableConfig().ParseFromString(str));
retv->m->conf = conf;
return retv;
}
ModelConfig::ModelConfig() : m(new ModelConfigPrivate()) {}
ModelConfig::~ModelConfig() { delete m; }
ModelConfig* TrainerConfig::getModelConfig() const {
auto retv = new ModelConfig();
retv->m->conf = m->conf;
return retv;
}
ParameterConfig::ParameterConfig() : m(new ParameterConfigPrivate()) {}
ParameterConfig::~ParameterConfig() { delete m; }
ParameterConfig* ParameterConfig::createParameterConfigFromParameterSharedPtr(
void* ptr) {
auto& p = *(paddle::ParameterPtr*)(ptr);
if (p != nullptr) {
auto conf = new ParameterConfig();
conf->m->parameter = p;
return conf;
} else {
return nullptr;
}
}
ParameterConfig* ParameterConfig::createParameterConfigFromParameterPtr(
void* ptr) {
auto& p = *(paddle::Parameter*)(ptr);
auto conf = new ParameterConfig();
conf->m->config = p.getConfig();
return conf;
}
std::string ParameterConfig::toProtoString() const {
return m->getConfigPtr()->SerializeAsString();
}
void* ParameterConfig::getRawPtr() { return m->getConfigPtr(); }
OptimizationConfig::OptimizationConfig() : m(new OptimizationConfigPrivate()) {}
OptimizationConfig::~OptimizationConfig() { delete m; }
std::string OptimizationConfig::toProtoString() {
return m->getConfig().SerializeAsString();
}
OptimizationConfig* TrainerConfig::getOptimizationConfig() const {
auto opt_config = new OptimizationConfig();
opt_config->m->trainer_config = m->conf;
return opt_config;
}
OptimizationConfig* OptimizationConfig::createFromProtoString(
const std::string& str) {
auto conf = new OptimizationConfig();
conf->m->config.ParseFromString(str);
return conf;
}
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <sstream>
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
Evaluator::Evaluator() : m(new EvaluatorPrivate()) {}
Evaluator::~Evaluator() { delete m; }
void Evaluator::start() { m->rawPtr->start(); }
void Evaluator::finish() { m->rawPtr->finish(); }
std::string Evaluator::toString() {
std::ostringstream sout;
m->rawPtr->printStats(sout);
return sout.str();
}
std::vector<std::string> Evaluator::getNames() const {
std::vector<std::string> retv;
m->rawPtr->getNames(&retv);
return retv;
}
double Evaluator::getValue(const std::string name) const {
paddle::Error err;
double v = m->rawPtr->getValue(name, &err);
if (!err.isOK()) {
throw std::runtime_error(err.msg());
}
return v;
}
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
#include "Internal.h"
#include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h"
std::vector<int> GradientMachine::defaultParamTypes = {
PARAMETER_VALUE, PARAMETER_GRADIENT, PARAMETER_MOMENTUM};
GradientMachine::GradientMachine() : m(new GradientMachinePrivate()) {}
GradientMachine::~GradientMachine() { delete m; }
GradientMachine* GradientMachine::createFromPaddleModelPtr(
const void* confPtr,
GradientMatchineCreateMode mode,
const std::vector<int>& types) {
auto& conf = *(const paddle::ModelConfig*)(confPtr);
std::vector<ParameterType> realTypes;
staticCastVector(&realTypes, types);
auto machineRawPtr = paddle::GradientMachine::create(conf, mode, realTypes);
auto machinePtr = std::shared_ptr<paddle::GradientMachine>(machineRawPtr);
if (machinePtr != nullptr) {
auto machine = new GradientMachine();
machine->m->machine = machinePtr;
return machine;
} else {
return nullptr;
}
}
GradientMachine* GradientMachine::createByConfigProtoStr(
const std::string& protoStr,
GradientMatchineCreateMode mode,
const std::vector<int>& types) {
paddle::ModelConfig conf;
conf.ParseFromString(protoStr);
if (conf.IsInitialized()) {
return GradientMachine::createFromPaddleModelPtr(&conf, mode, types);
} else {
return nullptr;
}
}
GradientMachine* GradientMachine::createByModelConfig(
ModelConfig* conf,
GradientMatchineCreateMode mode,
const std::vector<int>& types) {
auto confPtr = &conf->m->conf->getModelConfig();
return GradientMachine::createFromPaddleModelPtr(confPtr, mode, types);
}
void GradientMachine::start() { m->machine->start(); }
void GradientMachine::finish() { m->machine->finish(); }
void GradientMachine::onPassEnd() { m->machine->onPassEnd(); }
void GradientMachine::prefetch(const Arguments& inArgs) {
auto& in =
m->cast<std::vector<paddle::Argument>>(inArgs.getInternalArgumentsPtr());
m->machine->prefetch(in);
}
void GradientMachine::forward(const Arguments& inArgs,
Arguments* outArgs,
PassType passType) {
auto& in =
m->cast<std::vector<paddle::Argument>>(inArgs.getInternalArgumentsPtr());
auto& out = m->cast<std::vector<paddle::Argument>>(
outArgs->getInternalArgumentsPtr());
paddle::PassType pt = (paddle::PassType)(passType);
m->machine->forward(in, &out, pt);
}
UpdateCallback::~UpdateCallback() {}
void UpdateCallback::apply(Parameter* p) {
// UNUSED(p);
}
class UpdateCallbackWrapper {
public:
explicit UpdateCallbackWrapper(const UpdateCallback& callback)
: callback(const_cast<UpdateCallback&>(callback)) {}
void operator()(paddle::Parameter* param) {
auto p = Parameter::createFromRawPtr(&param);
// @TODO Use Stack variable instead.
callback.apply(p);
delete p;
}
private:
UpdateCallback& callback;
};
void GradientMachine::backward(const UpdateCallback& callback) {
m->machine->backward(UpdateCallbackWrapper(callback));
}
void GradientMachine::forwardBackward(const Arguments& inArgs,
Arguments* outArgs,
PassType passType,
const UpdateCallback& callback) {
auto& in =
m->cast<std::vector<paddle::Argument>>(inArgs.getInternalArgumentsPtr());
auto& out = m->cast<std::vector<paddle::Argument>>(
outArgs->getInternalArgumentsPtr());
paddle::PassType pt = (paddle::PassType)(passType);
m->machine->forwardBackward(in, &out, pt, UpdateCallbackWrapper(callback));
}
void GradientMachine::loadParameters(const std::string& path) {
m->machine->loadParameters(path);
}
size_t GradientMachine::getParameterSize() const {
return m->machine->getParameters().size();
}
Parameter* GradientMachine::getParameter(size_t i) throw(RangeError) {
auto params = m->machine->getParameters();
if (i < params.size()) {
return Parameter::createFromSharedPtr(&m->machine->getParameters()[i]);
} else {
throw RangeError();
}
}
size_t GradientMachine::getNonStaticParameterSize() const {
return m->machine->getNonStaticParameters().size();
}
Parameter* GradientMachine::getNonStaticParameter(size_t i) throw(RangeError) {
auto params = m->machine->getNonStaticParameters();
if (i < params.size()) {
return Parameter::createFromSharedPtr(
&m->machine->getNonStaticParameters()[i]);
} else {
throw RangeError();
}
}
void GradientMachine::randParameters() { m->machine->randParameters(); }
Arguments* GradientMachine::getLayerOutput(const std::string& layerName) const
throw(UnsupportError) {
auto nn = m->machine;
if (nn) {
auto arg = nn->getLayerOutput(layerName);
return Arguments::createByPaddleArgument(&arg);
} else {
throw UnsupportError();
}
}
SequenceGenerator* GradientMachine::asSequenceGenerator(
const std::vector<std::string>& dict,
size_t begin_id,
size_t end_id,
size_t max_length,
size_t beam_size) {
SequenceGenerator* r =
SequenceGenerator::createByGradientMachineSharedPtr(&m->machine);
r->setDict(dict);
r->setBos(begin_id);
r->setEos(end_id);
r->setMaxLength(max_length);
r->setBeamSize(beam_size);
return r;
}
Evaluator* GradientMachine::makeEvaluator() {
auto ev = new Evaluator();
ev->m->rawPtr = m->machine->makeEvaluator();
return ev;
}
void GradientMachine::eval(Evaluator* evaluator) {
m->machine->eval(evaluator->m->rawPtr);
}
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "PaddleAPI.h"
#include <algorithm>
#include <vector>
template <typename T1, typename T2>
void staticCastVector(std::vector<T2>* dest, const std::vector<T1>& src) {
dest->resize(src.size());
std::transform(src.begin(), src.end(), dest->begin(), [](T1 t) {
return static_cast<T2>(t);
});
}
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/legacy/math/Matrix.h"
#include <cstring>
#include <iostream>
#include "PaddleAPI.h"
#include "paddle/legacy/math/CpuSparseMatrix.h"
#include "paddle/legacy/math/SparseMatrix.h"
struct MatrixPrivate {
std::shared_ptr<paddle::Matrix> mat;
};
Matrix::Matrix() : m(new MatrixPrivate()) {}
Matrix* Matrix::createByPaddleMatrixPtr(void* sharedPtr) {
auto* mat = reinterpret_cast<paddle::MatrixPtr*>(sharedPtr);
if ((*mat) != nullptr) {
auto m = new Matrix();
m->m->mat = *mat;
return m;
} else {
return nullptr;
}
}
Matrix* Matrix::createZero(size_t height, size_t width, bool useGpu) {
auto m = new Matrix();
m->m->mat = paddle::Matrix::create(height, width, useGpu);
m->m->mat->zero();
return m;
}
Matrix* Matrix::createDense(const std::vector<float>& data,
size_t height,
size_t width,
bool useGpu) {
auto m = new Matrix();
m->m->mat = paddle::Matrix::create(height, width, useGpu);
m->m->mat->copyFrom(data.data(), data.size());
return m;
}
Matrix* Matrix::createDenseFromNumpy(float* data,
int dim1,
int dim2,
bool copy,
bool useGpu) throw(UnsupportError) {
if (useGpu) {
/// Gpu mode only supports copy=True
if (!copy) {
throw UnsupportError("Gpu mode only supports copy=True");
}
return Matrix::createGpuDenseFromNumpy(data, dim1, dim2);
} else {
return Matrix::createCpuDenseFromNumpy(data, dim1, dim2, copy);
}
}
Matrix* Matrix::createCpuDenseFromNumpy(float* data,
int dim1,
int dim2,
bool copy) {
auto m = new Matrix();
if (copy) {
m->m->mat = paddle::Matrix::create(dim1, dim2);
m->m->mat->copyFrom(data, dim1 * dim2);
} else {
m->m->mat = paddle::Matrix::create(data, dim1, dim2, false);
}
return m;
}
Matrix* Matrix::createGpuDenseFromNumpy(float* data, int dim1, int dim2) {
auto m = new Matrix();
m->m->mat = paddle::Matrix::create(dim1, dim2, false, true);
m->m->mat->copyFrom(data, dim1 * dim2);
return m;
}
Matrix* Matrix::createSparse(size_t height,
size_t width,
size_t nnz,
bool isNonVal,
bool isTrans,
bool useGpu) {
auto m = new Matrix();
m->m->mat = paddle::Matrix::createSparseMatrix(
height,
width,
nnz,
isNonVal ? paddle::NO_VALUE : paddle::FLOAT_VALUE,
isTrans,
useGpu);
return m;
}
Matrix::~Matrix() { delete m; }
size_t Matrix::getHeight() const { return m->mat->getHeight(); }
size_t Matrix::getWidth() const { return m->mat->getWidth(); }
float Matrix::get(size_t x, size_t y) const throw(RangeError) {
if (x > this->getWidth() || y > this->getHeight()) {
RangeError e;
throw e;
}
return m->mat->getElement(x, y);
}
void Matrix::set(size_t x, size_t y, float val) throw(RangeError,
UnsupportError) {
if (x > this->getWidth() || y > this->getHeight()) {
RangeError e;
throw e;
}
auto rawMat = m->mat.get();
if (auto cDenseMat = dynamic_cast<paddle::CpuMatrix*>(rawMat)) {
*(cDenseMat->getData() + x + y * cDenseMat->getWidth()) = val;
} else {
UnsupportError e;
throw e;
}
}
bool Matrix::isSparse() const {
auto raw_mat = m->mat.get();
return dynamic_cast<paddle::CpuSparseMatrix*>(raw_mat) != nullptr ||
dynamic_cast<paddle::GpuSparseMatrix*>(raw_mat) != nullptr;
}
SparseValueType Matrix::getSparseValueType() const throw(UnsupportError) {
auto cpuSparseMat =
std::dynamic_pointer_cast<paddle::CpuSparseMatrix>(m->mat);
if (cpuSparseMat != nullptr) {
return (SparseValueType)cpuSparseMat->getValueType();
} else {
auto gpuSparseMat =
std::dynamic_pointer_cast<paddle::GpuSparseMatrix>(m->mat);
if (gpuSparseMat != nullptr) {
return (SparseValueType)gpuSparseMat->getValueType();
} else {
UnsupportError e;
throw e;
}
}
}
SparseFormatType Matrix::getSparseFormat() const throw(UnsupportError) {
auto cpuSparseMat =
std::dynamic_pointer_cast<paddle::CpuSparseMatrix>(m->mat);
if (cpuSparseMat != nullptr) {
return (SparseFormatType)cpuSparseMat->getFormat();
} else {
auto gpuSparseMat =
std::dynamic_pointer_cast<paddle::GpuSparseMatrix>(m->mat);
if (gpuSparseMat != nullptr) {
return SPARSE_CSR;
} else {
UnsupportError e;
throw e;
}
}
}
IntArray Matrix::getSparseRowCols(size_t i) const
throw(UnsupportError, RangeError) {
auto cpuSparseMat =
std::dynamic_pointer_cast<paddle::CpuSparseMatrix>(m->mat);
if (cpuSparseMat != nullptr &&
cpuSparseMat->getFormat() == paddle::SPARSE_CSR) {
if (i < cpuSparseMat->getHeight()) {
// cpuSparseMat->print(std::cout);
size_t len = cpuSparseMat->getColNum(i);
return IntArray(cpuSparseMat->getRowCols(i), len);
} else {
RangeError e;
throw e;
}
} else {
UnsupportError e;
throw e;
}
}
IntWithFloatArray Matrix::getSparseRowColsVal(size_t i) const
throw(UnsupportError, RangeError) {
auto cpuSparseMat =
std::dynamic_pointer_cast<paddle::CpuSparseMatrix>(m->mat);
if (cpuSparseMat != nullptr &&
cpuSparseMat->getValueType() == paddle::FLOAT_VALUE) {
if (i < cpuSparseMat->getHeight()) {
return IntWithFloatArray(cpuSparseMat->getRowValues(i),
cpuSparseMat->getRowCols(i),
cpuSparseMat->getColNum(i));
} else {
RangeError e;
throw e;
}
} else {
UnsupportError e;
throw e;
}
}
FloatArray Matrix::getData() const {
auto rawMat = m->mat.get();
if (dynamic_cast<paddle::GpuMemoryHandle*>(rawMat->getMemoryHandle().get())) {
// is gpu. then copy data
float* data = rawMat->getData();
size_t len = rawMat->getElementCnt();
float* cpuData = new float[len];
hl_memcpy_device2host(cpuData, data, len * sizeof(float));
FloatArray ret_val(cpuData, len);
ret_val.needFree = true;
return ret_val;
} else {
FloatArray ret_val(rawMat->getData(), rawMat->getElementCnt());
return ret_val;
}
}
void Matrix::sparseCopyFrom(
const std::vector<int>& rows,
const std::vector<int>& cols,
const std::vector<float>& vals) throw(UnsupportError) {
auto cpuSparseMat =
std::dynamic_pointer_cast<paddle::CpuSparseMatrix>(m->mat);
if (cpuSparseMat != nullptr) {
// LOG(INFO) <<"RowSize = "<<rows.size()
// <<" ColSize = "<<cols.size()
// <<" ValSize = "<<vals.size();
cpuSparseMat->copyFrom(const_cast<std::vector<int>&>(rows),
const_cast<std::vector<int>&>(cols),
const_cast<std::vector<float>&>(vals));
} else {
UnsupportError e;
throw e;
}
}
void* Matrix::getSharedPtr() const { return &m->mat; }
void Matrix::toNumpyMatInplace(float** view_data,
int* dim1,
int* dim2) throw(UnsupportError) {
auto cpuMat = std::dynamic_pointer_cast<paddle::CpuMatrix>(m->mat);
if (cpuMat) {
*dim1 = cpuMat->getHeight();
*dim2 = cpuMat->getWidth();
*view_data = cpuMat->getData();
} else {
throw UnsupportError();
}
}
void Matrix::copyToNumpyMat(float** view_m_data,
int* dim1,
int* dim2) throw(UnsupportError) {
static_assert(sizeof(paddle::real) == sizeof(float),
"Currently PaddleAPI only support for single "
"precision version of paddle.");
if (this->isSparse()) {
throw UnsupportError();
} else {
*dim1 = m->mat->getHeight();
*dim2 = m->mat->getWidth();
*view_m_data = new float[(*dim1) * (*dim2)];
if (auto cpuMat = dynamic_cast<paddle::CpuMatrix*>(m->mat.get())) {
auto src = cpuMat->getData();
auto dest = *view_m_data;
std::memcpy(dest, src, sizeof(paddle::real) * (*dim1) * (*dim2));
} else if (auto gpuMat = dynamic_cast<paddle::GpuMatrix*>(m->mat.get())) {
auto src = gpuMat->getData();
auto dest = *view_m_data;
hl_memcpy_device2host(
dest, src, sizeof(paddle::real) * (*dim1) * (*dim2));
} else {
LOG(WARNING) << "Unexpected Situation";
throw UnsupportError();
}
}
}
void Matrix::copyFromNumpyMat(float* data,
int dim1,
int dim2) throw(UnsupportError, RangeError) {
if (isSparse()) {
throw UnsupportError();
} else {
if (this->getHeight() == (size_t)dim1 && this->getWidth() == (size_t)dim2) {
if (m->mat->getData() != data) {
m->mat->copyFrom(data, dim1 * dim2);
}
} else {
throw RangeError();
}
}
}
bool Matrix::isGpu() const {
auto rawPtr = m->mat.get();
return dynamic_cast<paddle::GpuMatrix*>(rawPtr) != nullptr ||
dynamic_cast<paddle::GpuSparseMatrix*>(rawPtr) != nullptr;
}
%module(directors="1") swig_paddle
%include "std_string.i"
%{
#define SWIG_FILE_WITH_INIT
#include "legacy/api/PaddleAPI.h"
%}
%include "exception.i"
%typemap(throws) UnsupportError %{
SWIG_exception(SWIG_RuntimeError, $1.what());
SWIG_fail;
%}
%include "std_vector.i"
%include "std_pair.i"
#ifdef SWIGPYTHON
%include "numpy.i"
#endif
%init %{
#ifdef SWIGPYTHON
import_array();
#endif
%}
namespace std {
%template(vector_int) vector<int>;
%template(vector_uint) vector<unsigned int>;
%template(vector_float) vector<float>;
%template(vector_string) vector<string>;
%template(vector_vec_star) vector<Vector*>;
}
#ifdef SWIGPYTHON
%typemap(in) (int argc, char** argv) {
int i = 0;
if (!PyList_Check($input)) {
PyErr_SetString(PyExc_ValueError, "Expecting a list");
return NULL;
}
$1 = PyList_Size($input);
$2 = (char **) malloc(($1+1)*sizeof(char *));
for (i = 0; i < $1; i++) {
PyObject *s = PyList_GetItem($input,i);
if (!PyString_Check(s)) {
free($2);
PyErr_SetString(PyExc_ValueError, "List items must be strings");
return NULL;
}
$2[i] = PyString_AsString(s);
}
$2[i] = 0;
}
%typemap(freearg) (int argc, char** argv) {
if ($2) free($2);
}
%typemap(out) FloatArray {
$result = PyList_New($1.length);
for (size_t i=0; i<$1.length; ++i) {
PyList_SetItem($result, i, PyFloat_FromDouble($1.buf[i]));
}
if($1.needFree) {
delete [] $1.buf;
}
}
%typemap(out) IntArray {
$result = PyList_New($1.length);
for (size_t i=0; i<$1.length; ++i) {
PyList_SetItem($result, i, PyInt_FromLong($1.buf[i]));
}
if ($1.needFree) {
delete [] $1.buf;
}
}
%typemap(out) IntWithFloatArray {
$result = PyList_New($1.length);
for (size_t i=0; i<$1.length; ++i) {
PyList_SetItem($result, i, PyTuple_Pack(2,
PyInt_FromLong($1.idxBuf[i]),
PyFloat_FromDouble($1.valBuf[i])
));
}
if ($1.needFree) {
delete [] $1.idxBuf;
delete [] $1.valBuf;
}
}
%rename(__getitem__) IVector::get;
%rename(__setitem__) IVector::set;
%rename(__len__) IVector::getSize;
%rename(__getitem__) Vector::get;
%rename(__setitem__) Vector::set;
%rename(__len__) Vector::getSize;
%rename(__len__) Parameter::getSize;
%rename(__call__) ParameterTraverseCallback::apply;
%rename(__repr__) Evaluator::toString;
%apply (float* INPLACE_ARRAY2, int DIM1, int DIM2) {
(float* data, int dim1, int dim2)
}
%apply (float** ARGOUTVIEW_ARRAY2, int* DIM1, int* DIM2) {
(float** view_data, int* dim1, int* dim2)
}
%apply (float** ARGOUTVIEWM_ARRAY2, int* DIM1, int* DIM2) {
(float** view_m_data, int* dim1, int* dim2)
}
%apply (int** ARGOUTVIEWM_ARRAY1, int* DIM1) {
(int** view_m_data, int* dim1)
}
%apply (int* INPLACE_ARRAY1, int DIM1) {
(int* data, int dim)
}
%apply (int** ARGOUTVIEW_ARRAY1, int* DIM1) {
(int** view_data, int* dim1)
}
%apply (float* INPLACE_ARRAY1, int DIM1) {
(float* data, int dim)
}
%apply (float** ARGOUTVIEW_ARRAY1, int* DIM1) {
(float** view_data, int* dim1)
}
%apply (float** ARGOUTVIEWM_ARRAY1, int* DIM1) {
(float** view_m_data, int* dim1)
}
#endif
// The below functions internally create object by "new", so it should use
// use SWIG to handle gc. There are hints for SWIG to handle GC.
%newobject Matrix::createZero;
%newobject Matrix::createSparse;
%newobject Matrix::createDense;
%newobject Matrix::createDenseFromNumpy;
%newobject Matrix::createCpuDenseFromNumpy;
%newobject Matrix::createGpuDenseFromNumpy;
%newobject Vector::createZero;
%newobject Vector::create;
%newobject Vector::createVectorFromNumpy;
%newobject Vector::createCpuVectorFromNumpy;
%newobject Vector::createGpuVectorFromNumpy;
%newobject IVector::createZero;
%newobject IVector::create;
%newobject IVector::createVectorFromNumpy;
%newobject IVector::createCpuVectorFromNumpy;
%newobject IVector::createGpuVectorFromNumpy;
%newobject Trainer::createByCommandLine;
%newobject Trainer::getForwardOutput;
%newobject Trainer::getLayerOutput;
%newobject Arguments::getSlotValue;
%newobject Arguments::getSlotIds;
%newobject Arguments::getSlotIn;
%newobject Arguments::getSlotSequenceStartPositions;
%newobject Arguments::getSlotSequenceDim;
%newobject Arguments::createArguments;
%newobject GradientMachine::createByConfigProtoStr;
%newobject GradientMachine::createByModelConfig;
%newobject GradientMachine::asSequenceGenerator;
%newobject GradientMachine::getParameter;
%newobject GradientMachine::getLayerOutput;
%newobject GradientMachine::makeEvaluator;
%newobject TrainerConfig::createFromTrainerConfigFile;
%newobject TrainerConfig::getModelConfig;
%newobject TrainerConfig::getOptimizationConfig;
%newobject Parameter::getBuf;
%newobject Parameter::getConfig;
%newobject ParameterOptimizer::create;
%newobject ParameterOptimizer::needSpecialTraversal;
%newobject ParameterUpdater::createLocalUpdater;
%newobject ParameterUpdater::createRemoteUpdater;
%newobject ParameterUpdater::createNewRemoteUpdater;
%feature("director") UpdateCallback;
%feature("autodoc", 1); // To generate method stub, for code hint in ide
// Ignore many private class, and method cannot be handled by swig.
%ignore MatrixPrivate;
%ignore TrainerPrivate;
%ignore IVector::operator[];
%ignore ArgumentsPrivate;
%ignore GradientMachinePrivate;
%ignore TrainerConfigPrivate;
%ignore ModelConfigPrivate;
%ignore ParameterPrivate;
%ignore SequenceGeneratorPrivate;
%ignore VectorPrivate;
%ignore ParameterConfigPrivate;
%ignore OptimizationConfigPrivate;
%ignore ParameterTraverseCallbackPrivate;
%include "legacy/utils/GlobalConstants.h"
%include "legacy/api/PaddleAPI.h"
此差异已折叠。
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include "PaddleAPI.h"
#include "paddle/legacy/gserver/evaluators/Evaluator.h"
#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h"
#include "paddle/legacy/parameter/ParameterUpdaterBase.h"
#include "paddle/legacy/trainer/TrainerConfigHelper.h"
struct GradientMachinePrivate {
std::shared_ptr<paddle::GradientMachine> machine;
template <typename T>
inline T& cast(void* ptr) {
return *(T*)(ptr);
}
};
struct OptimizationConfigPrivate {
std::shared_ptr<paddle::TrainerConfigHelper> trainer_config;
paddle::OptimizationConfig config;
const paddle::OptimizationConfig& getConfig() {
if (trainer_config != nullptr) {
return trainer_config->getOptConfig();
} else {
return config;
}
}
};
struct TrainerConfigPrivate {
std::shared_ptr<paddle::TrainerConfigHelper> conf;
TrainerConfigPrivate() {}
};
struct ModelConfigPrivate {
std::shared_ptr<paddle::TrainerConfigHelper> conf;
};
struct ArgumentsPrivate {
std::vector<paddle::Argument> outputs;
inline paddle::Argument& getArg(size_t idx) throw(RangeError) {
if (idx < outputs.size()) {
return outputs[idx];
} else {
RangeError e;
throw e;
}
}
template <typename T>
std::shared_ptr<T>& cast(void* rawPtr) const {
return *(std::shared_ptr<T>*)(rawPtr);
}
};
struct ParameterUpdaterPrivate {
std::unique_ptr<paddle::ParameterUpdater> updater;
};
struct ParameterPrivate {
std::shared_ptr<paddle::Parameter> sharedPtr;
paddle::Parameter* rawPtr; // rawPtr only used in ParameterUpdater,
// in other situation sharedPtr should
// contains value.
ParameterPrivate() : sharedPtr(nullptr), rawPtr(nullptr) {}
paddle::Parameter* getPtr() {
if (sharedPtr) {
return sharedPtr.get();
} else {
return rawPtr;
}
}
};
struct EvaluatorPrivate {
paddle::Evaluator* rawPtr;
EvaluatorPrivate() : rawPtr(nullptr) {}
~EvaluatorPrivate() { delete rawPtr; }
};
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/legacy/parameter/Parameter.h"
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
Parameter::Parameter() : m(new ParameterPrivate()) {}
Parameter::~Parameter() { delete m; }
Parameter* Parameter::createFromRawPtr(void* ptr) {
auto p = new Parameter();
p->m->rawPtr = *static_cast<paddle::Parameter**>(ptr);
return p;
}
Parameter* Parameter::createFromSharedPtr(void* ptr) {
auto& p = *(paddle::ParameterPtr*)(ptr);
if (p == nullptr) {
return nullptr;
} else {
auto retParam = new Parameter();
retParam->m->sharedPtr = p;
return retParam;
}
}
std::string Parameter::getName() const { return m->getPtr()->getName(); }
Vector* Parameter::getBuf(ParameterType type) {
auto buf = m->getPtr()->getBuf(type);
return Vector::createByPaddleVectorPtr(&buf);
}
ParameterConfig* Parameter::getConfig() {
if (m->sharedPtr) {
return ParameterConfig::createParameterConfigFromParameterSharedPtr(
&m->sharedPtr);
} else {
return ParameterConfig::createParameterConfigFromParameterPtr(m->rawPtr);
}
}
size_t Parameter::getID() const { return m->getPtr()->getID(); }
void Parameter::setValueUpdated() { m->getPtr()->setValueUpdated(); }
bool Parameter::save(const std::string& filename) const {
return m->getPtr()->save(filename);
}
bool Parameter::load(const std::string& filename) const {
return m->getPtr()->load(filename);
}
size_t Parameter::getSize() const { return m->getPtr()->getSize(); }
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/legacy/parameter/ParameterOptimizer.h"
#include <algorithm>
#include "Internal.h"
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
struct ParameterOptimizerPrivate {
std::unique_ptr<paddle::ParameterOptimizer> optimizer;
};
struct ParameterTraverseCallbackPrivate {
paddle::ParameterOptimizer::TraverseCallback callback;
ParameterTraverseCallbackPrivate() {}
ParameterTraverseCallbackPrivate(
const paddle::ParameterOptimizer::TraverseCallback& callback)
: callback(callback) {}
void apply(const std::vector<Vector*>& vecs,
const ParameterConfig& conf,
size_t sparseId) {
std::vector<paddle::VectorPtr> real_vecs;
real_vecs.resize(vecs.size());
std::transform(vecs.begin(), vecs.end(), real_vecs.begin(), [](Vector* v) {
if (v) {
return *(paddle::VectorPtr*)(v->getSharedPtr());
} else {
return paddle::VectorPtr();
}
});
paddle::ParameterConfig& real_conf =
*(paddle::ParameterConfig*)(const_cast<ParameterConfig&>(conf)
.getRawPtr());
callback(real_vecs.data(), real_conf, sparseId);
}
};
ParameterOptimizer::ParameterOptimizer() : m(new ParameterOptimizerPrivate()) {}
ParameterOptimizer::~ParameterOptimizer() { delete m; }
ParameterOptimizer* ParameterOptimizer::create(OptimizationConfig* config) {
CHECK(config != nullptr);
auto retOptimizer = new ParameterOptimizer();
retOptimizer->m->optimizer.reset(
paddle::ParameterOptimizer::create(config->m->getConfig(), false));
return retOptimizer;
}
void ParameterOptimizer::init(size_t numRows, const ParameterConfig* config) {
auto& conf = *(paddle::ParameterConfig*)(const_cast<ParameterConfig*>(config)
->getRawPtr());
m->optimizer->init(numRows, &conf);
}
void ParameterOptimizer::startPass() { m->optimizer->startPass(); }
void ParameterOptimizer::finishPass() { m->optimizer->finishPass(); }
void ParameterOptimizer::startBatch(size_t numSamplesProcessed) {
constexpr size_t high_1 = 1UL << (sizeof(size_t) * 8 - 1);
CHECK_EQ(numSamplesProcessed & high_1, 0UL); // Safely cast.
m->optimizer->startBatch((int64_t)numSamplesProcessed);
}
void ParameterOptimizer::finishBatch() { m->optimizer->finishBatch(); }
void ParameterOptimizer::update(const std::vector<Vector*>& vecs,
const ParameterConfig& conf,
size_t sparseId) {
ParameterTraverseCallbackPrivate invoker(
[&](const paddle::VectorPtr _vecs[],
const paddle::ParameterConfig& config,
size_t sid = -1UL) { m->optimizer->update(_vecs, config, sid); });
invoker.apply(vecs, conf, sparseId);
}
std::vector<int> ParameterOptimizer::getParameterTypes() const {
std::vector<int> returnValue;
staticCastVector(&returnValue, m->optimizer->getParameterTypes());
return returnValue;
}
ParameterTraverseCallback::ParameterTraverseCallback()
: m(new ParameterTraverseCallbackPrivate()) {}
ParameterTraverseCallback::~ParameterTraverseCallback() { delete m; }
void ParameterTraverseCallback::apply(const std::vector<Vector*>& vecs,
const ParameterConfig& conf,
size_t sparseId) {
m->apply(vecs, conf, sparseId);
}
ParameterTraverseCallback* ParameterOptimizer::needSpecialTraversal(
const ParameterConfig& config) const {
auto& param_config =
*(paddle::ParameterConfig*)const_cast<ParameterConfig&>(config)
.getRawPtr();
auto callback = m->optimizer->needSpecialTraversal(param_config);
if (callback) {
auto retCallback = new ParameterTraverseCallback();
retCallback->m->callback = callback;
return retCallback;
} else {
return nullptr;
}
}
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
#ifndef PADDLE_WITHOUT_GOLANG
#include "paddle/legacy/trainer/NewRemoteParameterUpdater.h"
#endif
#include "paddle/legacy/trainer/RemoteParameterUpdater.h"
#include "paddle/legacy/trainer/ThreadParameterUpdater.h"
ParameterUpdater::ParameterUpdater() : m(new ParameterUpdaterPrivate()) {}
ParameterUpdater *ParameterUpdater::createLocalUpdater(
OptimizationConfig *config) {
auto updater = new ParameterUpdater();
updater->m->updater.reset(
new paddle::SgdThreadUpdater(config->m->getConfig()));
return updater;
}
ParameterUpdater *ParameterUpdater::createNewRemoteUpdater(
OptimizationConfig *config,
const std::string pserverSpec,
const bool useEtcd) throw(UnsupportError) {
#ifndef PADDLE_WITHOUT_GOLANG
auto updater = new ParameterUpdater();
updater->m->updater.reset(new paddle::NewRemoteParameterUpdater(
config->m->getConfig(), pserverSpec, useEtcd));
return updater;
#else
throw UnsupportError("not compiled with WITH_GOLANG");
#endif
}
ParameterUpdater *ParameterUpdater::createRemoteUpdater(
OptimizationConfig *config, int passCount, bool useSparseUpdater) {
auto updater = new ParameterUpdater();
auto remoteUpdater = new paddle::RemoteParameterUpdater(
config->m->getConfig(), passCount, nullptr);
if (useSparseUpdater) {
std::unique_ptr<paddle::ParameterUpdater> remoteUpdaterPtr(remoteUpdater);
auto sparseRemoteUpdater =
new paddle::SparseRemoteParameterUpdaterComposite(
config->m->getConfig(),
passCount,
false,
std::move(remoteUpdaterPtr));
updater->m->updater.reset(sparseRemoteUpdater);
} else {
updater->m->updater.reset(remoteUpdater);
}
return updater;
}
ParameterUpdater::~ParameterUpdater() { delete m; }
void ParameterUpdater::init(const GradientMachine &gm) {
m->updater->init(gm.m->machine->getNonStaticParameters());
}
void ParameterUpdater::startPass() { m->updater->startPass(); }
void ParameterUpdater::finishPass() { m->updater->finishPass(); }
PassType ParameterUpdater::startBatch(size_t batchSize) {
return m->updater->startBatch((int64_t)batchSize);
}
void ParameterUpdater::finishBatch(float cost) {
m->updater->finishBatch(cost);
}
void ParameterUpdater::update(Parameter *param) {
auto paddleParam = param->m->getPtr();
m->updater->update(paddleParam);
}
void ParameterUpdater::getParametersRemote(bool fullSize, bool apply) {
m->updater->getParametersRemote(fullSize, apply);
}
void ParameterUpdater::restore() { m->updater->restore(); }
void ParameterUpdater::apply() { m->updater->apply(); }
void ParameterUpdater::catchUpWith() { m->updater->catchUpWith(); }
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <iterator>
#include <sstream>
#include <vector>
#include "PaddleAPI.h"
#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h"
#include "paddle/legacy/parameter/Argument.h"
#include "paddle/legacy/utils/Flags.h"
// used to represent partial sequence
struct Path {
std::vector<int> ids;
float logProb;
paddle::MachineState machineState;
Path() { logProb = 0; }
Path(std::vector<int>& ids, float logProb, paddle::MachineState& machineState)
: ids(ids), logProb(logProb), machineState(machineState) {}
bool operator<(const Path& other) const { return (logProb > other.logProb); }
};
// Return top k (k == beam_size) optimal paths using beam search. The last
// element of inArgs is the Argument of feedback. gradMachine has MaxIdLayer
// as output and outArgs thus stores top k labels and their probabilities per
// position
static void findNBest(paddle::GradientMachine* gradMachine,
std::vector<paddle::Argument>& inArgs,
std::vector<Path>& finalPaths,
size_t bos_id,
size_t eos_id,
size_t max_length) {
std::vector<Path> paths;
Path emptyPath;
paths.push_back(emptyPath);
finalPaths.clear();
gradMachine->resetState();
paddle::Argument feedback = inArgs.back();
feedback.ids->setElement(0, (int)(bos_id));
float minFinalPathLogProb = 0;
size_t beam = 0;
int id;
std::vector<paddle::Argument> outArgs;
while (true) { // iterate over each generated word
std::vector<Path> newPaths;
paddle::MachineState machineState;
for (size_t j = 0; j < paths.size(); j++) {
Path& path = paths[j];
if (path.machineState.size() > 0) {
gradMachine->setState(path.machineState);
feedback.ids->setElement(0, path.ids.back());
}
gradMachine->forward(inArgs, &outArgs, paddle::PASS_TEST);
gradMachine->getState(machineState);
beam = outArgs[0].ids->getSize();
for (size_t k = 0; k < beam; k++) {
id = outArgs[0].ids->getElement(k);
float prob = outArgs[0].in->getElement(0, k);
std::vector<int> nids(path.ids);
nids.push_back(id);
float newLogProb = path.logProb + log(prob);
Path newPath(nids, newLogProb, machineState);
if (id == (int)eos_id || nids.size() >= max_length) {
finalPaths.push_back(newPath);
if (minFinalPathLogProb > newPath.logProb) {
minFinalPathLogProb = newPath.logProb;
}
} else {
newPaths.push_back(newPath);
}
}
}
if (newPaths.size() == 0) {
break;
}
std::nth_element(newPaths.begin(),
newPaths.begin() + std::min(beam, newPaths.size()),
newPaths.end());
if (newPaths.size() > beam) {
newPaths.resize(beam);
}
// pathA < pathB means pathA.logProb > pathB.logProb
float maxPathLogProb =
std::min_element(newPaths.begin(), newPaths.end())->logProb;
if (finalPaths.size() >= beam && minFinalPathLogProb >= maxPathLogProb) {
break;
}
paths = newPaths;
} // end while
std::partial_sort(finalPaths.begin(),
finalPaths.begin() + std::min(beam, finalPaths.size()),
finalPaths.end());
if (finalPaths.size() > beam) {
finalPaths.resize(beam);
}
}
struct SequenceGeneratorPrivate {
std::shared_ptr<paddle::GradientMachine> machine;
std::shared_ptr<std::vector<std::string>> dict;
size_t beginPos;
size_t endPos;
size_t maxLength;
paddle::Argument feedback;
template <typename T>
inline T& cast(void* ptr) {
return *(T*)(ptr);
}
inline void findNBest(std::vector<paddle::Argument>& inArgs,
std::vector<Path>& path) {
::findNBest(machine.get(), inArgs, path, beginPos, endPos, maxLength);
}
SequenceGeneratorPrivate()
: dict(std::make_shared<std::vector<std::string>>()),
beginPos(0UL),
endPos(0UL),
maxLength(0UL),
feedback(__create_feedback__()) {}
private:
static paddle::Argument __create_feedback__() {
paddle::Argument feedback;
feedback.ids = paddle::IVector::create(/* size= */ 1, FLAGS_use_gpu);
feedback.sequenceStartPositions =
paddle::ICpuGpuVector::create(/* size= */ 2, /* useGpu= */ false);
feedback.sequenceStartPositions->getMutableData(false)[0] = 0;
feedback.sequenceStartPositions->getMutableData(false)[1] = 1;
return feedback;
}
};
SequenceGenerator::SequenceGenerator() : m(new SequenceGeneratorPrivate()) {}
SequenceGenerator::~SequenceGenerator() { delete m; }
class PathSequenceResults : public ISequenceResults {
// ISequenceResults interface
public:
PathSequenceResults(const std::shared_ptr<std::vector<Path>>& path,
const std::shared_ptr<std::vector<std::string>>& dict)
: path_(path), dict_(dict) {}
size_t getSize() const { return path_->size(); }
std::string getSentence(size_t id, bool split) const throw(RangeError) {
if (id < getSize()) {
Path& p = (*path_)[id];
std::ostringstream sout;
std::transform(p.ids.begin(),
p.ids.end(),
std::ostream_iterator<std::string>(sout, split ? " " : ""),
[&](int id) { return (*dict_)[id]; });
return sout.str();
} else {
RangeError e;
throw e;
}
}
std::vector<int> getSequence(size_t id) const throw(RangeError) {
if (id < getSize()) {
Path& p = (*path_)[id];
return p.ids;
} else {
RangeError e;
throw e;
}
}
float getScore(size_t id) const throw(RangeError) {
if (id < getSize()) {
Path& p = (*path_)[id];
return p.logProb;
} else {
RangeError e;
throw e;
}
}
private:
std::shared_ptr<std::vector<Path>> path_;
std::shared_ptr<std::vector<std::string>> dict_;
};
ISequenceResults* SequenceGenerator::generateSequence(
const Arguments& inArgs) const {
auto& in_args =
m->cast<std::vector<paddle::Argument>>(inArgs.getInternalArgumentsPtr());
for (auto& arg : in_args) {
arg.sequenceStartPositions = m->feedback.sequenceStartPositions;
}
in_args.push_back(m->feedback);
auto path = std::make_shared<std::vector<Path>>();
m->findNBest(in_args, *path);
return new PathSequenceResults(path, m->dict);
}
SequenceGenerator* SequenceGenerator::createByGradientMachineSharedPtr(
void* ptr) {
SequenceGenerator* r = new SequenceGenerator();
r->m->machine = r->m->cast<std::shared_ptr<paddle::GradientMachine>>(ptr);
return r;
}
void SequenceGenerator::setDict(const std::vector<std::string>& dict) {
*m->dict = dict;
}
void SequenceGenerator::setBos(size_t bos) { m->beginPos = bos; }
void SequenceGenerator::setEos(size_t eos) { m->endPos = eos; }
void SequenceGenerator::setMaxLength(size_t maxLength) {
m->maxLength = maxLength;
}
void SequenceGenerator::setBeamSize(size_t beamSize) {
if (beamSize != -1UL) {
FLAGS_beam_size = beamSize;
}
}
ISequenceResults::~ISequenceResults() {}
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "PaddleAPI.h"
#include "PaddleAPIPrivate.h"
#include <stdlib.h>
#include <atomic>
#include <memory>
#include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h"
#include "paddle/legacy/trainer/ParamUtil.h"
#include "paddle/legacy/trainer/Trainer.h"
#include "paddle/legacy/trainer/TrainerInternal.h"
#include "paddle/legacy/utils/Flags.h"
using paddle::real;
DECLARE_string(config);
DECLARE_string(init_model_path);
DECLARE_int32(start_pass);
struct TrainerPrivate : public paddle::Trainer {
bool _trainOneBatch(size_t batchSize);
bool forwardOneBatch(size_t batchSize);
void forwardOneDataBatch(const std::vector<paddle::Argument>& inArgs);
void setBatchSize(size_t batchSize);
std::vector<paddle::Argument>& getForwardOutput();
void startTestPeriod();
void finishTestPeriod();
void testOneDataBatch(const paddle::DataBatch& dataBatch);
TrainerPrivate() : paddle::Trainer() {}
};
Trainer::Trainer() : m(new TrainerPrivate()) {
auto conf = paddle::TrainerConfigHelper::createFromFlags();
if (conf != nullptr) {
m->init(conf);
}
}
Trainer::~Trainer() { delete m; }
Trainer* Trainer::createByCommandLine() throw(IOError) {
auto retv = new Trainer();
if (retv->m->getConfig().IsInitialized()) {
return retv;
} else {
throw IOError();
}
}
Trainer::Trainer(TrainerConfig* config, GradientMachine* gm)
: m(new TrainerPrivate()) {
m->init(config->m->conf, /* testing= */ false, gm ? gm->m->machine : nullptr);
}
Trainer* Trainer::create(TrainerConfig* config,
GradientMachine* gm) throw(IOError) {
auto retv = new Trainer(config, gm);
if (retv->m->getConfig().IsInitialized()) {
return retv;
} else {
retv->m->getConfig().CheckInitialized();
throw IOError();
}
}
void Trainer::startTrain() { m->startTrain(); }
void Trainer::finishTrain() { m->finishTrain(); }
void Trainer::startTrainPass() { m->startTrainPass(); }
void Trainer::finishTrainPass() { m->finishTrainPass(); }
void Trainer::trainOneDataBatch(size_t batchSize, const Arguments& inArgs) {
paddle::DataBatch dataBatch;
dataBatch.getStreams() = inArgs.m->outputs;
dataBatch.setSize(batchSize);
m->trainOneDataBatch(dataBatch);
}
bool Trainer::trainOneBatch(size_t batchSize) {
return m->_trainOneBatch(batchSize);
}
bool TrainerPrivate::_trainOneBatch(size_t batchSize) {
paddle::DataBatch dataBatch;
CHECK(dataProvider_) << "data_provider is not specified";
int num = dataProvider_->getNextBatch(batchSize, &dataBatch);
if (num == 0) {
return false;
}
trainOneDataBatch(dataBatch);
return false;
}
void TrainerPrivate::startTestPeriod() {
if (!tester_) {
createTester();
}
tester_->startTestPeriod();
}
void Trainer::startTestPeriod() { m->startTestPeriod(); }
void TrainerPrivate::testOneDataBatch(const paddle::DataBatch& dataBatch) {
tester_->testOneDataBatch(dataBatch, &forwardOutput_);
}
void Trainer::testOneDataBatch(size_t batchSize, const Arguments& args) {
paddle::DataBatch dataBatch;
dataBatch.getStreams() = args.m->outputs;
dataBatch.setSize(batchSize);
m->testOneDataBatch(dataBatch);
}
void TrainerPrivate::finishTestPeriod() { tester_->finishTestPeriod(); }
void Trainer::finishTestPeriod() { m->finishTestPeriod(); }
Arguments* Trainer::getLayerOutput(const std::string& layerName) const {
auto nn = this->m->getGradientMachine();
CHECK(nn) << "trainerInternal_.getGradientMachine() is not NeuralNetwork";
auto arg = nn->getLayerOutput(layerName);
return Arguments::createByPaddleArgument(&arg);
}
void Trainer::forwardOneBatch(size_t batchSize) {
m->forwardOneBatch(batchSize);
}
bool TrainerPrivate::forwardOneBatch(size_t batchSize) {
CHECK(dataProvider_) << "data_provider is not specified";
paddle::DataBatch dataBatch;
int num = dataProvider_->getNextBatch(batchSize, &dataBatch);
if (num == 0) {
return false;
}
forwardOneDataBatch(dataBatch.getStreams());
return true;
}
void TrainerPrivate::forwardOneDataBatch(
const std::vector<paddle::Argument>& inArgs) {
std::vector<paddle::Argument>& outArgs = forwardOutput_;
if (config_->getOptConfig().use_sparse_remote_updater()) {
trainerInternal_.getGradientMachine()->prefetch(inArgs);
trainerInternal_.getParameterUpdater()->getParametersRemote();
}
trainerInternal_.getGradientMachine()->forward(
inArgs, &outArgs, paddle::PASS_TEST);
}
Arguments* Trainer::getForwardOutput() {
return Arguments::createByPaddleArgumentVector(&m->getForwardOutput());
}
std::vector<paddle::Argument>& TrainerPrivate::getForwardOutput() {
return forwardOutput_;
}
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "PaddleAPI.h"
#include "paddle/legacy/parameter/Parameter.h"
#include "paddle/legacy/utils/Common.h"
#include "paddle/legacy/utils/Flags.h"
#include "paddle/legacy/utils/PythonUtil.h"
#include "paddle/legacy/utils/Util.h"
#include <algorithm>
#include <iostream>
#include <iterator>
void initPaddle(int argc, char** argv) {
paddle::initMain(argc, argv);
paddle::initPython(argc, argv);
feenableexcept(FE_INVALID | FE_DIVBYZERO | FE_OVERFLOW);
}
FloatArray::FloatArray(const float* b, const size_t l)
: buf(b), length(l), needFree(false) {}
IntArray::IntArray(const int* b, const size_t l, bool f)
: buf(b), length(l), needFree(f) {}
IntWithFloatArray::IntWithFloatArray(const float* v,
const int* i,
size_t l,
bool f)
: valBuf(v), idxBuf(i), length(l), needFree(f) {}
bool isUsingGpu() { return FLAGS_use_gpu; }
void setUseGpu(bool useGpu) { FLAGS_use_gpu = useGpu; }
bool isGpuVersion() {
#ifndef PADDLE_WITH_CUDA
return false;
#else
return true;
#endif
}
int getTrainerCount() { return FLAGS_trainer_count; }
static_assert(NUM_PARAMETER_TYPES == paddle::NUM_PARAMETER_TYPES,
"The Parameter Type should be same in core/api and core/common");
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "PaddleAPI.h"
#include "paddle/legacy/math/Vector.h"
#include <cstring>
struct IVectorPrivate {
paddle::IVectorPtr vec;
};
IVector::IVector() : m(new IVectorPrivate()) {}
IVector* IVector::createZero(size_t sz, bool useGpu) {
auto v = new IVector();
v->m->vec = paddle::IVector::create(sz, useGpu);
v->m->vec->zeroMem();
return v;
}
IVector* IVector::create(const std::vector<int>& data, bool useGpu) {
auto v = new IVector();
v->m->vec = paddle::IVector::create(data.size(), useGpu);
v->m->vec->copyFrom(data.data(), data.size());
return v;
}
IVector* IVector::createVectorFromNumpy(int* data,
int dim,
bool copy,
bool useGpu) throw(UnsupportError) {
if (useGpu) {
/// if use gpu only copy=true is supported
if (!copy) {
throw UnsupportError("Gpu mode only supports copy=True");
}
return IVector::createGpuVectorFromNumpy(data, dim);
} else {
return IVector::createCpuVectorFromNumpy(data, dim, copy);
}
}
IVector* IVector::createCpuVectorFromNumpy(int* data, int dim, bool copy) {
auto v = new IVector();
if (copy) {
v->m->vec = paddle::IVector::create(dim, false);
v->m->vec->copyFrom(data, dim);
} else {
v->m->vec = paddle::IVector::create(data, dim, false);
}
return v;
}
IVector* IVector::createGpuVectorFromNumpy(int* data, int dim) {
auto v = new IVector();
v->m->vec = paddle::IVector::create(dim, true);
v->m->vec->copyFrom(data, dim);
return v;
}
bool IVector::isGpu() const {
return dynamic_cast<paddle::GpuIVector*>(m->vec.get()) != nullptr;
}
IntArray IVector::getData() const {
if (this->isGpu()) {
int* src = m->vec->getData();
size_t len = m->vec->getSize();
int* dest = new int[len];
hl_memcpy_device2host(dest, src, len * sizeof(int));
return IntArray(dest, len, true);
} else {
return IntArray(m->vec->getData(), m->vec->getSize());
}
}
int& IVector::operator[](const size_t idx) throw(RangeError, UnsupportError) {
if (this->isGpu()) {
UnsupportError e;
throw e;
} else {
if (idx >= m->vec->getSize()) {
RangeError e;
throw e;
}
}
return m->vec->getData()[idx];
}
const int& IVector::operator[](const size_t idx) const
throw(RangeError, UnsupportError) {
return (*const_cast<IVector*>(this))[idx];
}
IVector* IVector::createByPaddleVectorPtr(void* ptr) {
auto* p = (paddle::IVectorPtr*)ptr;
if ((*p) != nullptr) {
IVector* vec = new IVector();
vec->m->vec = *p;
return vec;
} else {
return nullptr;
}
}
IVector::~IVector() { delete m; }
void* IVector::getSharedPtr() const { return &m->vec; }
size_t IVector::getSize() const { return m->vec->getSize(); }
void IVector::toNumpyArrayInplace(int** data, int* dim1) throw(UnsupportError) {
auto v = std::dynamic_pointer_cast<paddle::CpuIVector>(m->vec);
if (v) {
*data = v->getData();
*dim1 = v->getSize();
} else {
throw UnsupportError();
}
}
void IVector::copyToNumpyArray(int** view_m_data, int* dim1) {
*dim1 = m->vec->getSize();
*view_m_data = new int[*dim1];
if (auto cpuVec = dynamic_cast<paddle::CpuIVector*>(m->vec.get())) {
std::memcpy(*view_m_data, cpuVec->getData(), sizeof(int) * (*dim1));
} else if (auto gpuVec = dynamic_cast<paddle::GpuIVector*>(m->vec.get())) {
hl_memcpy_device2host(
*view_m_data, gpuVec->getData(), sizeof(int) * (*dim1));
} else {
LOG(INFO) << "Unexpected situation";
}
}
void IVector::copyFromNumpyArray(int* data, int dim) {
m->vec->resize(dim);
m->vec->copyFrom(data, dim);
}
struct VectorPrivate {
paddle::VectorPtr vec;
void safeAccessData(const size_t idx,
const std::function<void(float&)>& func) const
throw(RangeError, UnsupportError) {
auto cpuVec = std::dynamic_pointer_cast<const paddle::CpuVector>(vec);
if (cpuVec != nullptr) {
if (idx < vec->getSize()) {
func(vec->getData()[idx]);
} else {
throw RangeError();
}
} else {
throw UnsupportError();
}
}
};
Vector::Vector() : m(new VectorPrivate()) {}
Vector::~Vector() { delete m; }
Vector* Vector::createZero(size_t sz, bool useGpu) {
auto retVec = new Vector();
retVec->m->vec = paddle::Vector::create(sz, useGpu);
retVec->m->vec->zero();
return retVec;
}
Vector* Vector::create(const std::vector<float>& data, bool useGpu) {
auto retVec = new Vector();
retVec->m->vec = paddle::Vector::create(data.size(), useGpu);
retVec->m->vec->copyFrom(data.data(), data.size());
return retVec;
}
Vector* Vector::createByPaddleVectorPtr(void* ptr) {
auto& v = *(paddle::VectorPtr*)(ptr);
if (v == nullptr) {
return nullptr;
} else {
auto retVec = new Vector();
retVec->m->vec = v;
return retVec;
}
}
Vector* Vector::createVectorFromNumpy(float* data,
int dim,
bool copy,
bool useGpu) throw(UnsupportError) {
if (useGpu) {
/// if use gpu only copy=True is supported
if (!copy) {
throw UnsupportError("Gpu mode only supports copy=True");
}
return Vector::createGpuVectorFromNumpy(data, dim);
} else {
return Vector::createCpuVectorFromNumpy(data, dim, copy);
}
}
Vector* Vector::createCpuVectorFromNumpy(float* data, int dim, bool copy) {
CHECK_GT(dim, 0);
auto retVec = new Vector();
if (copy) {
retVec->m->vec = paddle::Vector::create((size_t)dim, false);
retVec->m->vec->copyFrom(data, dim);
} else {
retVec->m->vec = paddle::Vector::create(data, (size_t)dim, false);
}
return retVec;
}
Vector* Vector::createGpuVectorFromNumpy(float* data, int dim) {
CHECK_GT(dim, 0);
auto retVec = new Vector();
retVec->m->vec = paddle::Vector::create((size_t)dim, true);
retVec->m->vec->copyFrom(data, (size_t)dim);
return retVec;
}
void Vector::toNumpyArrayInplace(float** view_data,
int* dim1) throw(UnsupportError) {
auto v = std::dynamic_pointer_cast<paddle::CpuVector>(m->vec);
if (v != nullptr) {
*view_data = v->getData();
*dim1 = (int)v->getSize();
} else {
throw UnsupportError();
}
}
void Vector::copyToNumpyArray(float** view_m_data, int* dim1) {
*dim1 = m->vec->getSize();
*view_m_data = new float[*dim1];
if (auto cpuVec = dynamic_cast<paddle::CpuVector*>(m->vec.get())) {
std::memcpy(*view_m_data, cpuVec->getData(), sizeof(float) * (*dim1));
} else if (auto gpuVec = dynamic_cast<paddle::GpuVector*>(m->vec.get())) {
hl_memcpy_device2host(
*view_m_data, gpuVec->getData(), sizeof(float) * (*dim1));
} else {
LOG(INFO) << "Unexpected situation";
}
}
void Vector::copyFromNumpyArray(float* data, int dim) {
m->vec->resize(dim);
m->vec->copyFrom(data, dim);
}
FloatArray Vector::getData() const {
if (this->isGpu()) {
float* src = m->vec->getData();
size_t len = m->vec->getSize();
float* dest = new float[len];
hl_memcpy_device2host(dest, src, len * sizeof(float));
FloatArray ret_val(dest, len);
ret_val.needFree = true;
return ret_val;
} else {
FloatArray ret_val(m->vec->getData(), m->vec->getSize());
return ret_val;
}
}
void Vector::copyFrom(Vector* src) throw(RangeError) {
if (src->m->vec->getSize() != m->vec->getSize()) {
throw RangeError();
}
m->vec->copyFrom(*src->m->vec);
}
bool Vector::isGpu() const {
return std::dynamic_pointer_cast<paddle::GpuVector>(m->vec) != nullptr;
}
float Vector::get(const size_t idx) const throw(RangeError, UnsupportError) {
float r;
m->safeAccessData(idx, [&](float& o) { r = o; });
return r;
}
void Vector::set(const size_t idx, float val) throw(RangeError,
UnsupportError) {
m->safeAccessData(idx, [&](float& o) { o = val; });
}
size_t Vector::getSize() const { return m->vec->getSize(); }
void* Vector::getSharedPtr() { return &m->vec; }
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
此差异已折叠。
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/testTrain.py
COMMAND cp -r ${CMAKE_CURRENT_SOURCE_DIR}/*.py ${CMAKE_CURRENT_BINARY_DIR}
)
add_custom_target(copy_api_test ALL DEPENDS testTrain.py)
py_test(testTrain SRCS testTrain.py)
py_test(testMatrix SRCS testMatrix.py)
py_test(testVector SRCS testVector.py)
py_test(testTrainer SRCS testTrainer.py)
py_test(testArguments SRCS testArguments.py)
py_test(testGradientMachine SRCS testGradientMachine.py)
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py_paddle import swig_paddle
import numpy as np
import unittest
class TestArguments(unittest.TestCase):
def test_load_arguments(self):
m = swig_paddle.Matrix.createDense([4, 2, 4, 3, 9, 5], 2, 3)
args = swig_paddle.Arguments.createArguments(1)
args.setSlotValue(0, m)
self.assertAlmostEqual(27.0, args.sum())
mat = args.getSlotValue(0)
assert isinstance(mat, swig_paddle.Matrix)
np_mat = mat.toNumpyMatInplace()
# The matrix unittest is in testMatrix.py
self.assertEqual(np_mat.shape, (2, 3))
args.setSlotIds(0, swig_paddle.IVector.create([1, 2, 3, 4, 5, 6]))
iv = args.getSlotIds(0)
assert isinstance(iv, swig_paddle.IVector)
np_arr = iv.toNumpyArrayInplace()
self.assertEqual(np_arr.shape, (6, ))
def test_arguments_shape(self):
h, w = 4, 6
v = np.random.rand(2, h * w)
m = swig_paddle.Matrix.createDense(v.flatten(), 2, h * w)
args = swig_paddle.Arguments.createArguments(1)
args.setSlotValue(0, m)
args.setSlotFrameHeight(0, h)
args.setSlotFrameWidth(0, w)
self.assertEqual(args.getSlotFrameHeight(), h)
self.assertEqual(args.getSlotFrameWidth(), w)
if __name__ == '__main__':
swig_paddle.initPaddle("--use_gpu=0")
unittest.main()
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py_paddle import swig_paddle
import paddle.proto.ParameterConfig_pb2
import util
import unittest
import numpy
class TestGradientMachine(unittest.TestCase):
def test_create_gradient_machine(self):
conf_file_path = "./testTrainConfig.py"
trainer_config = swig_paddle.TrainerConfig.createFromTrainerConfigFile(
conf_file_path)
self.assertIsNotNone(trainer_config)
opt_config = trainer_config.getOptimizationConfig()
model_config = trainer_config.getModelConfig()
self.assertIsNotNone(model_config)
machine = swig_paddle.GradientMachine.createByModelConfig(
model_config, swig_paddle.CREATE_MODE_NORMAL,
swig_paddle.ParameterOptimizer.create(opt_config).getParameterTypes(
))
self.assertIsNotNone(machine)
ipt, _ = util.loadMNISTTrainData()
output = swig_paddle.Arguments.createArguments(0)
optimizers = {}
# Initial Machine Parameter all to 0.1
for param in machine.getParameters():
assert isinstance(param, swig_paddle.Parameter)
val = param.getBuf(swig_paddle.PARAMETER_VALUE)
assert isinstance(val, swig_paddle.Vector)
arr = numpy.full((len(val), ), 0.1, dtype="float32")
val.copyFromNumpyArray(arr)
self.assertTrue(param.save(param.getName()))
param_config = param.getConfig().toProto()
assert isinstance(param_config,
paddle.proto.ParameterConfig_pb2.ParameterConfig)
opt = swig_paddle.ParameterOptimizer.create(opt_config)
optimizers[param.getID()] = opt
num_rows = param_config.dims[1]
opt.init(num_rows, param.getConfig())
for k in optimizers:
opt = optimizers[k]
opt.startPass()
batch_size = ipt.getSlotValue(0).getHeight()
for k in optimizers:
opt = optimizers[k]
opt.startBatch(batch_size)
machine.forward(ipt, output, swig_paddle.PASS_TRAIN)
self.assertEqual(1, output.getSlotNum())
self.isCalled = False
def backward_callback(param_):
self.isCalled = isinstance(param_, swig_paddle.Parameter)
assert isinstance(param_, swig_paddle.Parameter)
vec = param_.getBuf(swig_paddle.PARAMETER_VALUE)
assert isinstance(vec, swig_paddle.Vector)
vec = vec.copyToNumpyArray()
for val_ in vec:
self.assertTrue(
util.doubleEqual(val_, 0.1)) # Assert All Value is 0.1
vecs = list(param_.getBufs())
opt_ = optimizers[param_.getID()]
opt_.update(vecs, param_.getConfig())
machine.backward(backward_callback)
for k in optimizers:
opt = optimizers[k]
opt.finishBatch()
for k in optimizers:
opt = optimizers[k]
opt.finishPass()
self.assertTrue(self.isCalled)
for param in machine.getParameters():
self.assertTrue(param.load(param.getName()))
def test_train_one_pass(self):
conf_file_path = './testTrainConfig.py'
trainer_config = swig_paddle.TrainerConfig.createFromTrainerConfigFile(
conf_file_path)
model_config = trainer_config.getModelConfig()
machine = swig_paddle.GradientMachine.createByModelConfig(model_config)
at_end = False
output = swig_paddle.Arguments.createArguments(0)
if not at_end:
input_, at_end = util.loadMNISTTrainData(1000)
machine.forwardBackward(input_, output, swig_paddle.PASS_TRAIN)
if __name__ == '__main__':
swig_paddle.initPaddle('--use_gpu=0')
unittest.main()
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py_paddle import swig_paddle
import numpy as np
import unittest
class TestMatrix(unittest.TestCase):
def test_createZero_get_set(self):
m = swig_paddle.Matrix.createZero(32, 24)
self.assertEqual(m.getWidth(), 24)
self.assertEqual(m.getHeight(), 32)
for x in xrange(24):
for y in xrange(32):
self.assertEqual(0.0, m.get(x, y))
with self.assertRaises(swig_paddle.RangeError):
m.get(51, 47)
m.set(3, 3, 3.0)
self.assertEqual(m.get(3, 3), 3.0)
def test_sparse(self):
m = swig_paddle.Matrix.createSparse(3, 3, 6, True, False, False)
self.assertIsNotNone(m)
self.assertTrue(m.isSparse())
self.assertEqual(m.getSparseValueType(), swig_paddle.SPARSE_NON_VALUE)
self.assertEqual(m.getSparseFormat(), swig_paddle.SPARSE_CSR)
m.sparseCopyFrom([0, 2, 3, 3], [0, 1, 2], [])
self.assertEqual(m.getSparseRowCols(0), [0, 1])
self.assertEqual(m.getSparseRowCols(1), [2])
self.assertEqual(m.getSparseRowCols(2), [])
def test_sparse_value(self):
m = swig_paddle.Matrix.createSparse(3, 3, 6, False, False, False)
self.assertIsNotNone(m)
m.sparseCopyFrom([0, 2, 3, 3], [0, 1, 2], [7.3, 4.2, 3.2])
def assertKVArraySame(actual, expect):
self.assertEqual(len(actual), len(expect))
for i in xrange(len(actual)):
a = actual[i]
e = expect[i]
self.assertIsInstance(a, tuple)
self.assertIsInstance(e, tuple)
self.assertEqual(len(a), 2)
self.assertEqual(len(e), 2)
self.assertEqual(a[0], e[0])
self.assertTrue(abs(a[1] - e[1]) < 1e-5)
first_row = m.getSparseRowColsVal(0)
assertKVArraySame(first_row, [(0, 7.3), (1, 4.2)])
def test_createDenseMat(self):
m = swig_paddle.Matrix.createDense([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], 2, 3)
self.assertIsNotNone(m)
self.assertTrue(abs(m.get(1, 1) - 0.5) < 1e-5)
def test_numpyCpu(self):
numpy_mat = np.matrix([[1, 2], [3, 4], [5, 6]], dtype="float32")
m = swig_paddle.Matrix.createCpuDenseFromNumpy(numpy_mat, False)
self.assertEqual((int(m.getHeight()), int(m.getWidth())),
numpy_mat.shape)
# the numpy matrix and paddle matrix shared the same memory.
numpy_mat[0, 1] = 342.23
for h in xrange(m.getHeight()):
for w in xrange(m.getWidth()):
self.assertEqual(m.get(h, w), numpy_mat[h, w])
mat2 = m.toNumpyMatInplace()
mat2[1, 1] = 32.2
self.assertTrue(np.array_equal(mat2, numpy_mat))
def test_numpyGpu(self):
if swig_paddle.isGpuVersion():
numpy_mat = np.matrix([[1, 2], [3, 4], [5, 6]], dtype='float32')
gpu_m = swig_paddle.Matrix.createGpuDenseFromNumpy(numpy_mat)
assert isinstance(gpu_m, swig_paddle.Matrix)
self.assertEqual((int(gpu_m.getHeight()), int(gpu_m.getWidth())),
numpy_mat.shape)
self.assertTrue(gpu_m.isGpu())
numpy_mat = gpu_m.copyToNumpyMat()
numpy_mat[0, 1] = 3.23
for a, e in zip(gpu_m.getData(), [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]):
self.assertAlmostEqual(a, e)
gpu_m.copyFromNumpyMat(numpy_mat)
for a, e in zip(gpu_m.getData(), [1.0, 3.23, 3.0, 4.0, 5.0, 6.0]):
self.assertAlmostEqual(a, e)
def test_numpy(self):
numpy_mat = np.matrix([[1, 2], [3, 4], [5, 6]], dtype="float32")
m = swig_paddle.Matrix.createDenseFromNumpy(numpy_mat)
self.assertEqual((int(m.getHeight()), int(m.getWidth())),
numpy_mat.shape)
self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu())
for a, e in zip(m.getData(), [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]):
self.assertAlmostEqual(a, e)
if __name__ == "__main__":
swig_paddle.initPaddle("--use_gpu=0")
suite = unittest.TestLoader().loadTestsFromTestCase(TestMatrix)
unittest.TextTestRunner().run(suite)
if swig_paddle.isGpuVersion():
swig_paddle.setUseGpu(True)
unittest.main()
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py_paddle import swig_paddle
import paddle.trainer.config_parser
import numpy
import util
def init_params(params):
def init_param(p):
assert isinstance(p, swig_paddle.Parameter)
val = p.getBuf(swig_paddle.PARAMETER_VALUE)
assert isinstance(val, swig_paddle.Vector)
arr = val.toNumpyArrayInplace()
for i in xrange(len(arr)):
arr[i] = numpy.random.uniform(-1.0, 1.0)
for p in params:
init_param(p)
def init_optimizers(opt_conf, params):
opts = {}
for param in params:
param_conf = param.getConfig().toProto()
opts[param.getID()] = swig_paddle.ParameterOptimizer.create(opt_conf)
opts[param.getID()].init(param_conf.dims[1], param.getConfig())
retv_opts = [None for _ in xrange(len(opts))]
for k in opts:
assert k < len(retv_opts)
retv_opts[k] = opts[k]
return retv_opts
def main():
trainer_config = paddle.trainer.config_parser.parse_config(
"./testTrainConfig.py", "")
opt_config = trainer_config.opt_config
print "========Optimization Config ======="
print opt_config
print "==================================="
opt_config = swig_paddle.OptimizationConfig.createFromProto(opt_config)
_temp_optimizer_ = swig_paddle.ParameterOptimizer.create(opt_config)
enable_types = _temp_optimizer_.getParameterTypes()
m = swig_paddle.GradientMachine.createFromConfigProto(
trainer_config.model_config, swig_paddle.CREATE_MODE_NORMAL,
enable_types)
assert m is not None
assert isinstance(m, swig_paddle.GradientMachine)
init_params(m.getParameters())
optimizers = init_optimizers(opt_config, m.getParameters())
# Train One Pass.
for optimizer in optimizers:
optimizer.startPass()
batch_id = 0
while True: # Train one batch
batch_size = 1000
inArgs, atEnd = util.loadMNISTTrainData(batch_size)
if atEnd:
break
outArgs = swig_paddle.Arguments.createArguments(0)
for optimizer in optimizers:
optimizer.startBatch(batch_size)
def update_callback(param):
try:
bufs = list(param.getBufs())
opt = optimizers[param.getID()]
opt.update(bufs, param.getConfig())
callback = opt.needSpecialTraversal(param.getConfig())
if callback is not None:
callback(bufs, param.getConfig(), swig_paddle.NO_SPARSE_ID)
except Exception as e:
print e
ev = m.makeEvaluator()
ev.start()
m.forwardBackward(inArgs, outArgs, swig_paddle.PASS_TRAIN,
update_callback)
m.eval(ev)
ev.finish()
for name in ev.getNames():
print name, ev.getValue(name)
for optimizer in optimizers:
optimizer.finishBatch()
cost_vec = outArgs.getSlotValue(0)
assert isinstance(cost_vec, swig_paddle.Matrix)
cost_vec = cost_vec.copyToNumpyMat()
print 'Finish Batch', batch_id, 'with cost ', cost_vec.sum(
) / batch_size
batch_id += 1
for optimizer in optimizers:
optimizer.finishPass()
if __name__ == '__main__':
swig_paddle.initPaddle("--use_gpu=0", "--trainer_count=1")
main()
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
settings(batch_size=100, learning_method=AdamOptimizer())
din = data_layer(name='input', size=784)
fc1 = fc_layer(name='hidden1', input=din, size=100)
fc2 = fc_layer(name='hidden2', input=fc1, size=100)
opt = fc_layer(input=fc2, size=10, act=SoftmaxActivation())
outputs(classification_cost(input=opt, label=data_layer('lbl', 10)))
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer.config_parser import parse_config
from paddle.trainer.config_parser import logger
from py_paddle import swig_paddle
import util
def main():
trainer_config = parse_config("./testTrainConfig.py", "")
model = swig_paddle.GradientMachine.createFromConfigProto(
trainer_config.model_config)
trainer = swig_paddle.Trainer.create(trainer_config, model)
trainer.startTrain()
for train_pass in xrange(2):
trainer.startTrainPass()
num = 0
cost = 0
while True: # Train one batch
batch_size = 1000
data, atEnd = util.loadMNISTTrainData(batch_size)
if atEnd:
break
trainer.trainOneDataBatch(batch_size, data)
outs = trainer.getForwardOutput()
cost += sum(outs[0]['value'])
num += batch_size
trainer.finishTrainPass()
logger.info('train cost=%f' % (cost / num))
trainer.startTestPeriod()
num = 0
cost = 0
while True: # Test one batch
batch_size = 1000
data, atEnd = util.loadMNISTTrainData(batch_size)
if atEnd:
break
trainer.testOneDataBatch(batch_size, data)
outs = trainer.getForwardOutput()
cost += sum(outs[0]['value'])
num += batch_size
trainer.finishTestPeriod()
logger.info('test cost=%f' % (cost / num))
trainer.finishTrain()
if __name__ == '__main__':
swig_paddle.initPaddle("--use_gpu=0", "--trainer_count=1")
main()
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py_paddle import swig_paddle
import util
import numpy as np
import unittest
class TestIVector(unittest.TestCase):
def test_createZero(self):
m = swig_paddle.IVector.createZero(10, False)
self.assertIsNotNone(m)
for i in xrange(10):
self.assertEqual(m[i], 0)
m[i] = i
self.assertEqual(m[i], i)
m = swig_paddle.IVector.createZero(10)
self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu())
self.assertEqual(m.getData(), [0] * 10)
def test_create(self):
m = swig_paddle.IVector.create(range(10), False)
self.assertIsNotNone(m)
for i in xrange(10):
self.assertEqual(m[i], i)
m = swig_paddle.IVector.create(range(10))
self.assertEqual(m.isGpu(), swig_paddle.isUsingGpu())
self.assertEqual(m.getData(), range(10))
def test_cpu_numpy(self):
vec = np.array([1, 3, 4, 65, 78, 1, 4], dtype="int32")
iv = swig_paddle.IVector.createCpuVectorFromNumpy(vec, False)
self.assertEqual(vec.shape[0], int(iv.__len__()))
vec[4] = 832
for i in xrange(len(iv)):
self.assertEqual(vec[i], iv[i])
vec2 = iv.toNumpyArrayInplace()
vec2[1] = 384
for i in xrange(len(iv)):
self.assertEqual(vec[i], iv[i])
self.assertEqual(vec2[i], iv[i])
def test_gpu_numpy(self):
if swig_paddle.isGpuVersion():
vec = swig_paddle.IVector.create(range(0, 10), True)
assert isinstance(vec, swig_paddle.IVector)
self.assertTrue(vec.isGpu())
self.assertEqual(vec.getData(), range(0, 10))
num_arr = vec.copyToNumpyArray()
assert isinstance(num_arr, np.ndarray) # for code hint.
num_arr[4] = 7
self.assertEquals(vec.getData(), range(0, 10))
vec.copyFromNumpyArray(num_arr)
expect_vec = range(0, 10)
expect_vec[4] = 7
self.assertEqual(vec.getData(), expect_vec)
def test_numpy(self):
vec = np.array([1, 3, 4, 65, 78, 1, 4], dtype="int32")
iv = swig_paddle.IVector.createVectorFromNumpy(vec)
self.assertEqual(iv.isGpu(), swig_paddle.isUsingGpu())
self.assertEqual(iv.getData(), list(vec))
class TestVector(unittest.TestCase):
def testCreateZero(self):
v = swig_paddle.Vector.createZero(10, False)
self.assertIsNotNone(v)
for i in xrange(len(v)):
self.assertTrue(util.doubleEqual(v[i], 0))
v[i] = i
self.assertTrue(util.doubleEqual(v[i], i))
v = swig_paddle.Vector.createZero(10)
self.assertEqual(v.isGpu(), swig_paddle.isUsingGpu())
self.assertEqual(v.getData(), [0] * 10)
def testCreate(self):
v = swig_paddle.Vector.create([x / 100.0 for x in xrange(100)], False)
self.assertIsNotNone(v)
for i in xrange(len(v)):
self.assertTrue(util.doubleEqual(v[i], i / 100.0))
self.assertEqual(100, len(v))
v = swig_paddle.Vector.create([x / 100.0 for x in xrange(100)])
self.assertEqual(v.isGpu(), swig_paddle.isUsingGpu())
self.assertEqual(100, len(v))
vdata = v.getData()
for i in xrange(len(v)):
self.assertTrue(util.doubleEqual(vdata[i], i / 100.0))
def testCpuNumpy(self):
numpy_arr = np.array([1.2, 2.3, 3.4, 4.5], dtype="float32")
vec = swig_paddle.Vector.createCpuVectorFromNumpy(numpy_arr, False)
assert isinstance(vec, swig_paddle.Vector)
numpy_arr[0] = 0.1
for n, v in zip(numpy_arr, vec):
self.assertTrue(util.doubleEqual(n, v))
numpy_2 = vec.toNumpyArrayInplace()
vec[0] = 1.3
for x, y in zip(numpy_arr, numpy_2):
self.assertTrue(util.doubleEqual(x, y))
for x, y in zip(numpy_arr, vec):
self.assertTrue(util.doubleEqual(x, y))
numpy_3 = vec.copyToNumpyArray()
numpy_3[0] = 0.4
self.assertTrue(util.doubleEqual(vec[0], 1.3))
self.assertTrue(util.doubleEqual(numpy_3[0], 0.4))
for i in xrange(1, len(numpy_3)):
util.doubleEqual(numpy_3[i], vec[i])
def testNumpy(self):
numpy_arr = np.array([1.2, 2.3, 3.4, 4.5], dtype="float32")
vec = swig_paddle.Vector.createVectorFromNumpy(numpy_arr)
self.assertEqual(vec.isGpu(), swig_paddle.isUsingGpu())
vecData = vec.getData()
for n, v in zip(numpy_arr, vecData):
self.assertTrue(util.doubleEqual(n, v))
def testCopyFromNumpy(self):
vec = swig_paddle.Vector.createZero(1, False)
arr = np.array([1.3, 3.2, 2.4], dtype="float32")
vec.copyFromNumpyArray(arr)
for i in xrange(len(vec)):
self.assertTrue(util.doubleEqual(vec[i], arr[i]))
if __name__ == '__main__':
swig_paddle.initPaddle("--use_gpu=0")
suite = unittest.TestLoader().loadTestsFromTestCase(TestVector)
unittest.TextTestRunner().run(suite)
if swig_paddle.isGpuVersion():
swig_paddle.setUseGpu(True)
unittest.main()
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
from py_paddle import swig_paddle
def doubleEqual(a, b):
return abs(a - b) < 1e-5
def __readFromFile():
for i in xrange(10002):
label = np.random.randint(0, 9)
sample = np.random.rand(784) + 0.1 * label
yield sample, label
def loadMNISTTrainData(batch_size=100):
if not hasattr(loadMNISTTrainData, "gen"):
generator = __readFromFile()
loadMNISTTrainData.gen = generator
else:
generator = loadMNISTTrainData.gen
args = swig_paddle.Arguments.createArguments(2)
# batch_size = 100
dense_slot = []
id_slot = []
atEnd = False
for _ in xrange(batch_size):
try:
result = generator.next()
dense_slot.extend(result[0])
id_slot.append(result[1])
except StopIteration:
atEnd = True
del loadMNISTTrainData.gen
break
dense_slot = swig_paddle.Matrix.createDense(dense_slot, batch_size, 784)
id_slot = swig_paddle.IVector.create(id_slot)
args.setSlotValue(0, dense_slot)
args.setSlotIds(1, id_slot)
return args, atEnd
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "arguments.h"
#include "capi_private.h"
using paddle::capi::cast;
#define castArg(v) cast<paddle::capi::CArguments>(v)
#define castIVec(v) cast<paddle::capi::CIVector>(v)
extern "C" {
paddle_arguments paddle_arguments_create_none() {
return new paddle::capi::CArguments();
}
paddle_error paddle_arguments_destroy(paddle_arguments args) {
if (args == nullptr) return kPD_NULLPTR;
delete castArg(args);
return kPD_NO_ERROR;
}
paddle_error paddle_arguments_get_size(paddle_arguments args, uint64_t* size) {
if (args == nullptr || size == nullptr) return kPD_NULLPTR;
*size = castArg(args)->args.size();
return kPD_NO_ERROR;
}
paddle_error paddle_arguments_resize(paddle_arguments args, uint64_t size) {
if (args == nullptr) return kPD_NULLPTR;
castArg(args)->args.resize(size);
return kPD_NO_ERROR;
}
paddle_error paddle_arguments_set_value(paddle_arguments args,
uint64_t ID,
paddle_matrix mat) {
if (args == nullptr || mat == nullptr) return kPD_NULLPTR;
auto m = paddle::capi::cast<paddle::capi::CMatrix>(mat);
if (m->mat == nullptr) return kPD_NULLPTR;
auto a = castArg(args);
if (ID >= a->args.size()) return kPD_OUT_OF_RANGE;
a->args[ID].value = m->mat;
return kPD_NO_ERROR;
}
paddle_error paddle_arguments_get_value(paddle_arguments args,
uint64_t ID,
paddle_matrix mat) {
if (args == nullptr || mat == nullptr) return kPD_NULLPTR;
auto m = paddle::capi::cast<paddle::capi::CMatrix>(mat);
auto a = castArg(args);
if (ID >= a->args.size()) return kPD_OUT_OF_RANGE;
m->mat = a->args[ID].value;
return kPD_NO_ERROR;
}
PD_API paddle_error paddle_arguments_get_prob(paddle_arguments args,
uint64_t ID,
paddle_matrix mat) {
if (args == nullptr || mat == nullptr) return kPD_NULLPTR;
auto m = paddle::capi::cast<paddle::capi::CMatrix>(mat);
auto a = castArg(args);
if (ID >= a->args.size()) return kPD_OUT_OF_RANGE;
m->mat = a->args[ID].in;
return kPD_NO_ERROR;
}
paddle_error paddle_arguments_get_ids(paddle_arguments args,
uint64_t ID,
paddle_ivector ids) {
if (args == nullptr || ids == nullptr) return kPD_NULLPTR;
auto iv = castIVec(ids);
auto a = castArg(args);
if (ID >= a->args.size()) return kPD_OUT_OF_RANGE;
iv->vec = a->args[ID].ids;
return kPD_NO_ERROR;
}
paddle_error paddle_arguments_set_ids(paddle_arguments args,
uint64_t ID,
paddle_ivector ids) {
//! TODO(lizhao): Complete this method.
if (args == nullptr || ids == nullptr) return kPD_NULLPTR;
auto iv = paddle::capi::cast<paddle::capi::CIVector>(ids);
if (iv->vec == nullptr) return kPD_NULLPTR;
auto a = castArg(args);
if (ID >= a->args.size()) return kPD_OUT_OF_RANGE;
a->args[ID].ids = iv->vec;
return kPD_NO_ERROR;
}
paddle_error paddle_arguments_set_frame_shape(paddle_arguments args,
uint64_t ID,
uint64_t frameHeight,
uint64_t frameWidth) {
if (args == nullptr) return kPD_NULLPTR;
auto a = castArg(args);
if (ID >= a->args.size()) return kPD_OUT_OF_RANGE;
a->args[ID].setFrameHeight(frameHeight);
a->args[ID].setFrameWidth(frameWidth);
return kPD_NO_ERROR;
}
paddle_error paddle_arguments_set_sequence_start_pos(paddle_arguments args,
uint64_t ID,
uint32_t nestedLevel,
paddle_ivector seqPos) {
if (args == nullptr || seqPos == nullptr) return kPD_NULLPTR;
auto iv = paddle::capi::cast<paddle::capi::CIVector>(seqPos);
if (iv->vec == nullptr) return kPD_NULLPTR;
auto a = castArg(args);
return a->accessSeqPos(ID, nestedLevel, [&iv](paddle::ICpuGpuVectorPtr& ptr) {
ptr = std::make_shared<paddle::ICpuGpuVector>(iv->vec);
});
}
paddle_error paddle_arguments_get_sequence_start_pos(paddle_arguments args,
uint64_t ID,
uint32_t nestedLevel,
paddle_ivector seqPos) {
if (args == nullptr || seqPos == nullptr) return kPD_NULLPTR;
auto iv = paddle::capi::cast<paddle::capi::CIVector>(seqPos);
auto a = castArg(args);
return a->accessSeqPos(ID, nestedLevel, [&iv](paddle::ICpuGpuVectorPtr& ptr) {
iv->vec = ptr->getMutableVector(false);
});
}
}
if (WITH_DOUBLE)
set(PADDLE_FLOAT_TYPE double)
else ()
set(PADDLE_FLOAT_TYPE float)
endif()
execute_process(
COMMAND ${GIT_EXECUTABLE} log --pretty=format:%H -1
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}
OUTPUT_VARIABLE PADDLE_GIT_COMMIT
RESULT_VARIABLE PADDLE_GIT_COMMIT_RESULT
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if(NOT PADDLE_GIT_COMMIT)
set(PADDLE_GIT_COMMIT "no commit information")
endif()
# config.h used for C-API. It will store Paddle building configuration as a
# header. Make user just include PaddleCAPI.h then can get building
# configuration without explicitly set -DPADDLE_WITH_DOUBLE when building their
# libraries.
configure_file(config.h.in config.h @ONLY)
# PaddleCAPI.h is the only header we exposed. It currently only used for model
# inference.
file(GLOB CAPI_HEADERS *.h)
set(CAPI_PRIVATE_HEADER capi_private.h)
list(REMOVE_ITEM CAPI_HEADERS ${CAPI_PRIVATE_HEADER})
file(GLOB CAPI_SOURCES *.cpp)
# building paddle_capi
add_library(paddle_capi STATIC ${CAPI_HEADERS} ${CAPI_PRIVATE_HEADER}
${CAPI_SOURCES})
target_include_directories(paddle_capi PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
add_dependencies(paddle_capi paddle_proto paddle_gserver)
# TODO: paddle_capi_whole will be removed.
set(PADDLE_CAPI_LAYERS_LIBS
paddle_function
paddle_gserver)
if(MOBILE_INFERENCE)
set(PADDLE_CAPI_ENGINE_LIBS
paddle_utils
paddle_parameter
paddle_math
paddle_cuda
paddle_proto)
else()
set(PADDLE_CAPI_ENGINE_LIBS
paddle_utils
paddle_parameter
paddle_math
paddle_cuda
paddle_proto
paddle_pserver
paddle_network)
endif()
set(PADDLE_CAPI_INFER_LIBS ${PADDLE_CAPI_LAYERS_LIBS} ${PADDLE_CAPI_ENGINE_LIBS})
cc_library(paddle_capi_whole DEPS paddle_capi ${PADDLE_CAPI_INFER_LIBS})
# Link the static library for inference
cc_library(paddle_capi_engine DEPS paddle_capi ${PADDLE_CAPI_ENGINE_LIBS})
cc_library(paddle_capi_layers DEPS ${PADDLE_CAPI_LAYERS_LIBS})
# Link the shared library for inference
if(NOT IOS)
set(LINK_FLAGS "-Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/paddle_capi.map")
add_library(paddle_capi_shared SHARED ${CAPI_SOURCES})
set_target_properties(paddle_capi_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
link_paddle_exe(paddle_capi_shared)
endif()
# install library & headers.
install(FILES ${CAPI_HEADERS} DESTINATION include/paddle)
install(FILES paddle_capi.map DESTINATION include/paddle)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/config.h DESTINATION include/paddle)
if(ANDROID)
install(TARGETS paddle_capi_whole paddle_capi_engine paddle_capi_layers paddle_capi_shared
ARCHIVE DESTINATION lib/${ANDROID_ABI}
LIBRARY DESTINATION lib/${ANDROID_ABI})
execute_process(
COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -1
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}
OUTPUT_VARIABLE GIT_COMMITS_LIST
RESULT_VARIABLE GIT_COMMITS_LIST_RESULT
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if(${GIT_COMMITS_LIST_RESULT})
set(GIT_COMMITS_LIST "No commits.")
endif()
install(CODE "FILE(WRITE ${CMAKE_INSTALL_PREFIX}/lib/${ANDROID_ABI}/BUILD.txt
\"Compiler:\n\"
\"\\t${CMAKE_C_COMPILER}\\n\"
\"\\t${CMAKE_CXX_COMPILER}\\n\"
\"Compiler Flags:\\n\"
\"\\t${CMAKE_F_FLAGS}\\n\"
\"\\t${CMAKE_CXX_FLAGS}\\n\"
\"Android API: ${CMAKE_SYSTEM_VERSION}\\n\"
\"Lastest commit:\\n\"
\"\\t${GIT_COMMITS_LIST}\\n\"
)"
)
else(ANDROID)
install(TARGETS paddle_capi_whole paddle_capi_engine paddle_capi_layers ARCHIVE DESTINATION lib)
if(NOT IOS)
install(TARGETS paddle_capi_shared DESTINATION lib)
endif()
endif(ANDROID)
# this variable used for unittest
set(PADDLE_CAPI_INC_PATH
${CMAKE_CURRENT_BINARY_DIR}
${CMAKE_CURRENT_SOURCE_DIR})
if (WITH_TESTING)
add_subdirectory(tests)
endif()
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <fenv.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
#include "capi_private.h"
#include "main.h"
#include "paddle/legacy/trainer/TrainerConfigHelper.h"
#include "paddle/legacy/utils/Excepts.h"
#include "paddle/legacy/utils/PythonUtil.h"
static void initPaddle(int argc, char** argv) {
paddle::initMain(argc, argv);
paddle::initPython(argc, argv);
}
extern "C" {
paddle_error paddle_init(int argc, char** argv) {
static bool isInit = false;
if (isInit) return kPD_NO_ERROR;
std::vector<char*> realArgv;
realArgv.reserve(argc + 1);
realArgv.push_back(strdup(""));
for (int i = 0; i < argc; ++i) {
realArgv.push_back(argv[i]);
}
initPaddle(argc + 1, realArgv.data());
free(realArgv[0]);
isInit = true;
return kPD_NO_ERROR;
}
paddle_error paddle_init_thread() {
if (FLAGS_use_gpu) {
hl_init(FLAGS_gpu_id);
}
return kPD_NO_ERROR;
}
}
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "capi_private.h"
#include "hl_cuda.h"
#include "matrix.h"
#define cast(v) paddle::capi::cast<paddle::capi::CMatrix>(v)
extern "C" {
paddle_matrix paddle_matrix_create(uint64_t height,
uint64_t width,
bool useGpu) {
auto ptr = new paddle::capi::CMatrix();
ptr->mat = paddle::Matrix::create(height, width, false, useGpu);
return ptr;
}
paddle_matrix paddle_matrix_create_none() {
return new paddle::capi::CMatrix();
}
paddle_error paddle_matrix_destroy(paddle_matrix mat) {
if (mat == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
delete ptr;
return kPD_NO_ERROR;
}
paddle_error paddle_matrix_set_row(paddle_matrix mat,
uint64_t rowID,
paddle_real* rowArray) {
if (mat == nullptr || rowArray == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
if (ptr->mat == nullptr) return kPD_NULLPTR;
if (rowID >= ptr->mat->getHeight()) return kPD_OUT_OF_RANGE;
paddle::real* buf = ptr->mat->getRowBuf(rowID);
size_t width = ptr->mat->getWidth();
#ifdef PADDLE_WITH_CUDA
hl_memcpy(buf, rowArray, sizeof(paddle::real) * width);
#else
std::copy(rowArray, rowArray + width, buf);
#endif
return kPD_NO_ERROR;
}
PD_API paddle_error paddle_matrix_set_value(paddle_matrix mat,
paddle_real* value) {
if (mat == nullptr || value == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
if (ptr->mat == nullptr) return kPD_NULLPTR;
paddle::real* buf = ptr->mat->getRowBuf(0);
size_t width = ptr->mat->getWidth();
size_t height = ptr->mat->getHeight();
if (ptr->mat->useGpu()) {
#ifdef PADDLE_WITH_CUDA
hl_memcpy(buf, value, sizeof(paddle::real) * width * height);
#else
return kPD_NOT_SUPPORTED;
#endif
} else {
std::copy(value, value + width * height, buf);
}
return kPD_NO_ERROR;
}
PD_API paddle_error paddle_matrix_get_value(paddle_matrix mat,
paddle_real* result) {
if (mat == nullptr || result == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
if (ptr->mat == nullptr) return kPD_NULLPTR;
paddle::real* buf = ptr->mat->getRowBuf(0);
size_t width = ptr->mat->getWidth();
size_t height = ptr->mat->getHeight();
if (ptr->mat->useGpu()) {
#ifdef PADDLE_WITH_CUDA
hl_memcpy(result, buf, width * height * sizeof(paddle::real));
#else
return kPD_NOT_SUPPORTED;
#endif
} else {
std::copy(buf, buf + width * height, result);
}
return kPD_NO_ERROR;
}
paddle_error paddle_matrix_get_row(paddle_matrix mat,
uint64_t rowID,
paddle_real** rawRowBuffer) {
if (mat == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
if (ptr->mat == nullptr) return kPD_NULLPTR;
if (rowID >= ptr->mat->getHeight()) return kPD_OUT_OF_RANGE;
*rawRowBuffer = ptr->mat->getRowBuf(rowID);
return kPD_NO_ERROR;
}
paddle_error paddle_matrix_get_shape(paddle_matrix mat,
uint64_t* height,
uint64_t* width) {
if (mat == nullptr || cast(mat)->mat == nullptr) return kPD_NULLPTR;
if (height != nullptr) {
*height = cast(mat)->mat->getHeight();
}
if (width != nullptr) {
*width = cast(mat)->mat->getWidth();
}
return kPD_NO_ERROR;
}
}
paddle_matrix paddle_matrix_create_sparse(
uint64_t height, uint64_t width, uint64_t nnz, bool isBinary, bool useGpu) {
#ifndef PADDLE_MOBILE_INFERENCE
auto ptr = new paddle::capi::CMatrix();
ptr->mat = paddle::Matrix::createSparseMatrix(
height,
width,
nnz,
isBinary ? paddle::NO_VALUE : paddle::FLOAT_VALUE,
paddle::SPARSE_CSR,
false,
useGpu);
return ptr;
#else
return nullptr;
#endif
}
paddle_error paddle_matrix_sparse_copy_from(paddle_matrix mat,
int* rowArray,
uint64_t rowSize,
int* colArray,
uint64_t colSize,
float* valueArray,
uint64_t valueSize) {
#ifndef PADDLE_MOBILE_INFERENCE
if (mat == nullptr) return kPD_NULLPTR;
auto ptr = cast(mat);
if (rowArray == nullptr || colArray == nullptr ||
(valueSize != 0 && valueArray == nullptr) || ptr->mat == nullptr) {
return kPD_NULLPTR;
}
if (auto sparseMat = dynamic_cast<paddle::CpuSparseMatrix*>(ptr->mat.get())) {
std::vector<int> row(rowSize);
row.assign(rowArray, rowArray + rowSize);
std::vector<int> col(colSize);
col.assign(colArray, colArray + colSize);
std::vector<paddle_real> val(valueSize);
if (valueSize) {
val.assign(valueArray, valueArray + valueSize);
}
sparseMat->copyFrom(row, col, val);
return kPD_NO_ERROR;
} else {
return kPD_NOT_SUPPORTED;
}
#else
return kPD_NOT_SUPPORTED;
#endif
}
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "capi_private.h"
#include "vector.h"
using paddle::capi::cast;
extern "C" {
paddle_ivector paddle_ivector_create_none() {
return new paddle::capi::CIVector();
}
paddle_ivector paddle_ivector_create(int* array,
uint64_t size,
bool copy,
bool useGPU) {
auto ptr = new paddle::capi::CIVector();
if (copy) {
ptr->vec = paddle::IVector::create(size, useGPU);
ptr->vec->copyFrom(array, size);
} else {
ptr->vec = paddle::IVector::create(array, size, useGPU);
}
return ptr;
}
paddle_error paddle_ivector_destroy(paddle_ivector ivec) {
if (ivec == nullptr) return kPD_NULLPTR;
delete cast<paddle::capi::CIVector>(ivec);
return kPD_NO_ERROR;
}
paddle_error paddle_ivector_get(paddle_ivector ivec, int** buffer) {
if (ivec == nullptr || buffer == nullptr) return kPD_NULLPTR;
auto v = cast<paddle::capi::CIVector>(ivec);
if (v->vec == nullptr) return kPD_NULLPTR;
*buffer = v->vec->getData();
return kPD_NO_ERROR;
}
paddle_error paddle_ivector_resize(paddle_ivector ivec, uint64_t size) {
if (ivec == nullptr) return kPD_NULLPTR;
auto v = cast<paddle::capi::CIVector>(ivec);
if (v->vec == nullptr) return kPD_NULLPTR;
v->vec->resize(size);
return kPD_NO_ERROR;
}
paddle_error paddle_ivector_get_size(paddle_ivector ivec, uint64_t* size) {
if (ivec == nullptr) return kPD_NULLPTR;
auto v = cast<paddle::capi::CIVector>(ivec);
if (v->vec == nullptr) return kPD_NULLPTR;
*size = v->vec->getSize();
return kPD_NO_ERROR;
}
}
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifndef __PADDLE_CAPI_ARGUMENTS_H__
#define __PADDLE_CAPI_ARGUMENTS_H__
#include <stdint.h>
#include "config.h"
#include "error.h"
#include "matrix.h"
#include "vector.h"
/**
* Arguments functions. Each argument means layer output. Arguments means a
* array of arguemnt.
*/
typedef void* paddle_arguments;
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief paddle_arguments_create_none Create a array of arguments, which size
* is zero.
* @return Arguemnts
*/
PD_API paddle_arguments paddle_arguments_create_none();
/**
* @brief paddle_arguments_destroy Destroy the arguments
* @param args arguments to destroy
* @return paddle_error
*/
PD_API paddle_error paddle_arguments_destroy(paddle_arguments args);
/**
* @brief paddle_arguments_get_size Get size of arguments array
* @param [in] args arguments array
* @param [out] size array size
* @return paddle_error
*/
PD_API paddle_error paddle_arguments_get_size(paddle_arguments args,
uint64_t* size);
/**
* @brief PDArgsResize Resize a arguments array.
* @param args arguments array.
* @param size target size of array
* @return paddle_error
*/
PD_API paddle_error paddle_arguments_resize(paddle_arguments args,
uint64_t size);
/**
* @brief PDArgsSetValue Set value matrix of one argument in array, which index
* is `ID`.
* @param args arguments array
* @param ID array index
* @param mat matrix pointer
* @return paddle_error
*/
PD_API paddle_error paddle_arguments_set_value(paddle_arguments args,
uint64_t ID,
paddle_matrix mat);
/**
* @brief PDArgsGetValue Get value matrix of one argument in array, which index
* is `ID`.
* @param [in] args arguments array
* @param [in] ID array index
* @param [out] mat matrix pointer
* @return paddle_error
*/
PD_API paddle_error paddle_arguments_get_value(paddle_arguments args,
uint64_t ID,
paddle_matrix mat);
/**
* @brief paddle_arguments_get_prob Get the prob matrix of beam search, which
* slot ID is `ID`
* @param [in] args arguments array
* @param [in] ID array index
* @param [out] mat matrix pointer
* @return paddle_error
*/
PD_API paddle_error paddle_arguments_get_prob(paddle_arguments args,
uint64_t ID,
paddle_matrix mat);
/**
* @brief PDArgsGetIds Get the integer vector of one argument in array, which
* index is `ID`.
* @param args arguments array
* @param ID array index
* @param ids integer vector pointer
* @return paddle_error
*/
PD_API paddle_error paddle_arguments_get_ids(paddle_arguments args,
uint64_t ID,
paddle_ivector ids);
/**
* @brief PDArgsSetIds Set the integer vector of one argument in array, which
* index is `ID`.
* @param [in] args arguments array
* @param [in] ID array index
* @param [out] ids integer vector pointer
* @return paddle_error
*/
PD_API paddle_error paddle_arguments_set_ids(paddle_arguments args,
uint64_t ID,
paddle_ivector ids);
/**
* @brief paddle_arguments_set_frame_shape Set the fram size of one argument
* in array, which index is `ID`.
* @param [in] args arguments array
* @param [in] ID array index
* @param [in] frameHeight maximum height of input images
* @param [in] frameWidth maximum width of input images
* @return paddle_error
*/
PD_API paddle_error paddle_arguments_set_frame_shape(paddle_arguments args,
uint64_t ID,
uint64_t frameHeight,
uint64_t frameWidth);
/**
* @brief PDArgsSetSequenceStartPos Set sequence start position vector of one
* argument in array, which index is `ID`.
* @param args arguments array
* @param ID array index
* @param seqPos sequence position array.
* @return paddle_error
*/
PD_API paddle_error
paddle_arguments_set_sequence_start_pos(paddle_arguments args,
uint64_t ID,
uint32_t nestedLevel,
paddle_ivector seqPos);
/**
* @brief PDArgsGetSequenceStartPos Get sequence start position vector of one
* argument in array, which index is `ID`.
* @param [in] args arguments array
* @param [in] ID array index
* @param [out] seqPos sequence position array
* @return paddle_error
*/
PD_API paddle_error
paddle_arguments_get_sequence_start_pos(paddle_arguments args,
uint64_t ID,
uint32_t nestedLevel,
paddle_ivector seqPos);
#ifdef __cplusplus
}
#endif
#endif
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifndef __PADDLE_CAPI_H__
#define __PADDLE_CAPI_H__
/**
* Paddle C API. It will replace SWIG as Multiple Language API for model
* training & inference. Currently it is only used in model infernece.
*
* NOTE: This is an experimental API, it could be changed.
*/
#include "arguments.h"
#include "config.h"
#include "error.h"
#include "gradient_machine.h"
#include "main.h"
#include "matrix.h"
#include "vector.h"
#endif // PADDLECAPI_H_
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "capi.h"
#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h"
#include "paddle/legacy/math/Matrix.h"
#include "paddle/legacy/math/Vector.h"
#include "paddle/legacy/parameter/Argument.h"
#pragma once
namespace paddle {
namespace capi {
enum CType { kIVECTOR = 0, kMATRIX, kARGUMENTS, kGRADIENT_MACHINE };
#define STRUCT_HEADER CType type;
struct CHeader {
STRUCT_HEADER
};
struct CIVector {
STRUCT_HEADER
IVectorPtr vec;
CIVector() : type(kIVECTOR) {}
};
struct CMatrix {
STRUCT_HEADER
MatrixPtr mat;
CMatrix() : type(kMATRIX) {}
};
struct CArguments {
STRUCT_HEADER
std::vector<paddle::Argument> args;
CArguments() : type(kARGUMENTS) {}
template <typename T>
paddle_error accessSeqPos(uint64_t ID, uint32_t nestedLevel, T callback) {
if (ID >= args.size()) return kPD_OUT_OF_RANGE;
switch (nestedLevel) {
case 0:
callback(args[ID].sequenceStartPositions);
break;
case 1:
callback(args[ID].subSequenceStartPositions);
break;
default:
return kPD_OUT_OF_RANGE;
}
return kPD_NO_ERROR;
}
};
struct CGradientMachine {
STRUCT_HEADER
paddle::GradientMachinePtr machine;
CGradientMachine() : type(kGRADIENT_MACHINE) {}
};
template <typename T>
inline T* cast(void* ptr) {
return reinterpret_cast<T*>(ptr);
}
} // namespace capi
} // namespace paddle
#ifndef __PADDLE_PADDLE_CAPI_CONFIG_H_INCLUDED__
#define __PADDLE_PADDLE_CAPI_CONFIG_H_INCLUDED__
typedef @PADDLE_FLOAT_TYPE@ paddle_real;
#define __PADDLE_VERSION__ "@PADDLE_VERSION@"
#define __PADDLE_COMMIT__ "@PADDLE_GIT_COMMIT@"
// Since we only support linux and macos in compile, always use clang or
// gcc 4.8+. DLL_IMPORT/DLL_EXPORT is as simple as below.
#define PD_API __attribute__((visibility("default")))
#endif
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册