GradientMachine.cpp 3.3 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
#include "PaddleCAPI.h"
#include "PaddleCAPIPrivate.h"
#include "paddle/gserver/gradientmachines/NeuralNetwork.h"

#define cast(v) paddle::capi::cast<paddle::capi::CGradientMachine>(v)

enum GradientMatchineCreateMode {
  CREATE_MODE_NORMAL = 0,
  CREATE_MODE_TESTING = 4
};

namespace paddle {

class MyNeuralNetwork : public NeuralNetwork {
public:
  MyNeuralNetwork(const std::string& name, NeuralNetwork* network)
      : NeuralNetwork(name, network) {}
};

NeuralNetwork* newCustomNerualNetwork(const std::string& name,
                                      NeuralNetwork* network) {
  return new MyNeuralNetwork(name, network);
}
}

extern "C" {
int PDGradientMachineCreateForPredict(PD_GradiemtMachine* machine,
                                      void* modelConfigProtobuf,
                                      int size) {
Y
Yu Yang 已提交
30
  if (modelConfigProtobuf == nullptr) return kPD_NULLPTR;
Y
Yu Yang 已提交
31 32 33
  paddle::ModelConfig config;
  if (!config.ParseFromArray(modelConfigProtobuf, size) ||
      !config.IsInitialized()) {
Y
Yu Yang 已提交
34
    return kPD_PROTOBUF_ERROR;
Y
Yu Yang 已提交
35 36 37 38 39 40
  }

  auto ptr = new paddle::capi::CGradientMachine();
  ptr->machine.reset(paddle::GradientMachine::create(
      config, CREATE_MODE_TESTING, {paddle::PARAMETER_VALUE}));
  *machine = ptr;
Y
Yu Yang 已提交
41
  return kPD_NO_ERROR;
Y
Yu Yang 已提交
42 43 44 45
}

int PDGradientMachineDestroy(PD_GradiemtMachine machine) {
  delete cast(machine);
Y
Yu Yang 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
  return kPD_NO_ERROR;
}

int PDGradientMachineLoadParameterFromDisk(PD_GradiemtMachine machine,
                                           const char* path) {
  auto m = cast(machine);
  if (m == nullptr || path == nullptr || m->machine == nullptr)
    return kPD_NULLPTR;
  m->machine->loadParameters(path);
  return kPD_NO_ERROR;
}

int PDGradientMachineForward(PD_GradiemtMachine machine,
                             PD_Arguments inArgs,
                             PD_Arguments outArgs,
                             bool isTrain) {
  auto m = cast(machine);
  auto in = paddle::capi::cast<paddle::capi::CArguments>(inArgs);
  auto out = paddle::capi::cast<paddle::capi::CArguments>(outArgs);
  if (m == nullptr || in == nullptr || out == nullptr || m->machine == nullptr)
    return kPD_NULLPTR;
  m->machine->forward(
      in->args, &out->args, isTrain ? paddle::PASS_TRAIN : paddle::PASS_TEST);
  return kPD_NO_ERROR;
}

int PDGradientMachineCreateSharedParam(PD_GradiemtMachine origin,
                                       void* modelConfigProtobuf,
                                       int size,
                                       PD_GradiemtMachine* slave) {
  auto o = cast(origin);
  if (origin == nullptr || slave == nullptr || o->machine == nullptr) {
    return kPD_NULLPTR;
  }
  paddle::ModelConfig config;
  if (!config.ParseFromArray(modelConfigProtobuf, size) ||
      !config.IsInitialized()) {
    return kPD_PROTOBUF_ERROR;
  }

  std::unique_ptr<paddle::capi::CGradientMachine> ptr(
      new paddle::capi::CGradientMachine());
  auto nn = paddle::NeuralNetwork::create(config);
  nn->init(config,
           [&o](int paramId, paddle::Parameter* param) {
             auto p = o->machine->getParameters()[paramId];
             param->enableSharedType(paddle::PARAMETER_VALUE,
                                     p->getBuf(paddle::PARAMETER_VALUE));

           },
           {paddle::PARAMETER_VALUE},
           false);
  ptr->machine.reset(nn);
  *slave = ptr.release();
  return kPD_NO_ERROR;
Y
Yu Yang 已提交
101 102
}
}