py_func_op.cc 9.3 KB
Newer Older
S
sneaxiy 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/operators/py_func_op.h"
#include <set>
#include <string>
#include <vector>
#include "Python.h"
#include "paddle/fluid/framework/op_registry.h"

namespace paddle {
namespace operators {

S
sneaxiy 已提交
25
namespace py = ::pybind11;
S
sneaxiy 已提交
26 27 28

static std::vector<py::object> g_py_callables;

S
sneaxiy 已提交
29 30 31 32
const char kForwardPythonCallableId[] = "forward_callable_id";
const char kBackwardPythonCallableId[] = "backward_callable_id";
const char kPyFuncBackwardSkipVars[] = "backward_skip_vars";

S
sneaxiy 已提交
33
size_t AppendPythonCallableObjectAndReturnId(const py::object &py_obj) {
S
sneaxiy 已提交
34 35 36 37 38
  g_py_callables.emplace_back(py_obj);
  return g_py_callables.size() - 1;
}

static py::object *GetPythonCallableObject(size_t i) {
S
sneaxiy 已提交
39
  PADDLE_ENFORCE_LT(i, g_py_callables.size(), "Invalid python callable id");
S
sneaxiy 已提交
40 41 42
  return &g_py_callables[i];
}

S
sneaxiy 已提交
43 44 45 46 47 48
std::string PythonObjectToString(const py::object &py_callable) {
  py::gil_scoped_acquire guard;
  return py::str(*py_callable);
}

void CallPythonFunc(py::object *callable,
S
sneaxiy 已提交
49 50
                    const std::vector<framework::LoDTensor> &ins,
                    std::vector<framework::LoDTensor *> *out) {
S
sneaxiy 已提交
51
  py::gil_scoped_acquire guard;
S
sneaxiy 已提交
52 53
  py::tuple in_args(ins.size());
  for (size_t i = 0; i < ins.size(); ++i) {
S
sneaxiy 已提交
54
    in_args[i] = ins[i].IsInitialized() ? py::cast(ins[i]) : py::cast(nullptr);
S
sneaxiy 已提交
55 56
  }

S
sneaxiy 已提交
57
  auto ret = (*callable)(*in_args);
S
sneaxiy 已提交
58 59 60
  auto ret_tuple = py::cast<py::tuple>(ret);
  PADDLE_ENFORCE_EQ(py::len(ret_tuple), out->size(), "Output number not match");
  for (size_t i = 0; i < out->size(); ++i) {
S
sneaxiy 已提交
61 62 63
    if ((*out)[i] == nullptr) {
      continue;
    }
S
sneaxiy 已提交
64 65 66
    try {
      auto *out_tensor = py::cast<framework::LoDTensor *>(ret_tuple[i]);
      PADDLE_ENFORCE_NOT_NULL(out_tensor,
S
sneaxiy 已提交
67
                              "Output tensor %d should not be nullptr", i);
S
sneaxiy 已提交
68 69 70 71 72 73 74 75 76 77 78
      (*out)[i]->set_lod(out_tensor->lod());
      (*out)[i]->ShareDataWith(*out_tensor);
    } catch (py::cast_error &) {
      PADDLE_THROW("Output %d is not LoDTensor", i);
    }
  }
}

class PyFuncOpShapeInference : public framework::InferShapeBase {
 public:
  void operator()(framework::InferShapeContext *ctx) const override {
S
sneaxiy 已提交
79 80
    PADDLE_ENFORCE(!ctx->IsRuntime(),
                   "Infer shape cannot be called in runtime.");
S
sneaxiy 已提交
81 82 83 84
    PADDLE_ENFORCE(ctx->HasInputs("X") || ctx->HasOutputs("Out"),
                   "Input(X) or Output(Out) must exist");
    PADDLE_ENFORCE_GE(ctx->Attrs().Get<int>(kForwardPythonCallableId), 0,
                      "Function id cannot be less than 0");
S
sneaxiy 已提交
85 86 87 88 89 90 91

    auto *op = boost::get<const framework::OpDesc *>(ctx->GetOp());
    auto *block = op->Block();
    const std::string kGradVarSuffix = framework::kGradVarSuffix;
    auto out_vars = ctx->GetOutputVarPtrs("Out");
    for (auto &out_var : out_vars) {
      auto *out_var_desc = boost::get<framework::VarDesc *>(out_var);
S
sneaxiy 已提交
92 93 94
      if (out_var_desc == nullptr) {
        continue;
      }
S
sneaxiy 已提交
95 96
      auto out_name = out_var_desc->Name();
      if (out_name == framework::kEmptyVarName ||
S
sneaxiy 已提交
97
          out_name.size() <= kGradVarSuffix.size()) {
S
sneaxiy 已提交
98 99 100 101 102 103 104 105 106
        continue;
      }

      size_t len = out_name.size() - kGradVarSuffix.size();
      if (out_name.substr(len) == kGradVarSuffix) {
        auto fwd_var_name = out_name.substr(0, len);
        auto *in_var_desc = block->FindVarRecursive(fwd_var_name);
        PADDLE_ENFORCE_NOT_NULL(in_var_desc, "Forward variable %s not found",
                                fwd_var_name);
S
sneaxiy 已提交
107 108
        VLOG(10) << "Infer shape of Out(" << out_name << ") as Input("
                 << in_var_desc->Name() << ")";
S
sneaxiy 已提交
109 110 111 112 113 114
        out_var_desc->SetShape(in_var_desc->GetShape());
        out_var_desc->SetDataType(in_var_desc->GetDataType());
        out_var_desc->SetLoDLevel(in_var_desc->GetLoDLevel());
        out_var_desc->SetType(in_var_desc->GetType());
      }
    }
S
sneaxiy 已提交
115 116 117 118 119 120 121 122
  }
};

class PyFuncOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X", "Inputs of py_func op.").AsDuplicable();
    AddOutput("Out", "Outputs of py_func op").AsDuplicable();
S
sneaxiy 已提交
123 124
    AddAttr<int>(kForwardPythonCallableId,
                 "Index of registered forward Python function.")
S
sneaxiy 已提交
125
        .SetDefault(0);
S
sneaxiy 已提交
126 127 128 129 130 131
    AddAttr<int>(kBackwardPythonCallableId,
                 "Index of registered backward Python function")
        .SetDefault(-1);
    AddAttr<std::vector<std::string>>(kPyFuncBackwardSkipVars,
                                      "Unused forward in/out in backward op")
        .SetDefault(std::vector<std::string>());
S
sneaxiy 已提交
132 133 134 135
    AddComment(R"DOC("PyFunc Op")DOC");
  }
};

S
sneaxiy 已提交
136 137 138 139 140 141
class PyFuncOpGradDescMaker : public framework::GradOpDescMakerBase {
 public:
  using framework::GradOpDescMakerBase::GradOpDescMakerBase;

  std::vector<std::unique_ptr<framework::OpDesc>> operator()() const override {
    auto &fwd_attrs = Attrs();
S
sneaxiy 已提交
142 143
    // no backward op when backward_id is less than 0
    if (boost::get<int>(fwd_attrs.at(kBackwardPythonCallableId)) < 0) {
S
sneaxiy 已提交
144 145 146 147 148 149 150
      return {};
    }

    std::unique_ptr<framework::OpDesc> grad_op(new framework::OpDesc());
    grad_op->SetType("py_func");

    framework::AttributeMap bwd_attrs;
S
sneaxiy 已提交
151 152 153
    bwd_attrs[kForwardPythonCallableId] =
        fwd_attrs.at(kBackwardPythonCallableId);
    bwd_attrs[kBackwardPythonCallableId] = -1;
S
sneaxiy 已提交
154 155
    grad_op->SetAttrMap(bwd_attrs);

S
sneaxiy 已提交
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
    // All forward inputs
    auto fwd_ins = Input("X");
    // All forward outputs
    auto fwd_outs = Output("Out");

    // For memory reused, some inputs/output in forward part may be not needed
    // in backward part
    // Just skip these vars
    auto &backward_skip_var_list = boost::get<std::vector<std::string>>(
        fwd_attrs.at(kPyFuncBackwardSkipVars));
    std::unordered_set<std::string> backward_skip_var_set(
        backward_skip_var_list.begin(), backward_skip_var_list.end());
    std::vector<std::string> bwd_ins;
    bwd_ins.reserve(fwd_ins.size() + fwd_outs.size());
    for (auto &fwd_in : fwd_ins) {
      if (backward_skip_var_set.count(fwd_in) == 0) {
        bwd_ins.emplace_back(fwd_in);
      }
    }

    for (auto &fwd_out : fwd_outs) {
      if (backward_skip_var_set.count(fwd_out) == 0) {
        bwd_ins.emplace_back(fwd_out);
      }
    }

    // Backward OG cannot be skipped
    // But in Python side, if OG is kEmptyVarName, input tensor would be None
    auto fwd_out_grads = OutputGrad("Out");
    bwd_ins.reserve(bwd_ins.size() + fwd_out_grads.size());
    bwd_ins.insert(bwd_ins.end(), fwd_out_grads.begin(), fwd_out_grads.end());
S
sneaxiy 已提交
187

S
sneaxiy 已提交
188 189 190
    // Backward IG cannot be skipped
    // But in Python side, if IG is not needed, users can just return None
    auto bwd_outs = InputGrad("X", false);
S
sneaxiy 已提交
191 192 193

    if (VLOG_IS_ON(10)) {
      std::string in_str = "PyFunc Grad Input: ";
S
sneaxiy 已提交
194
      for (auto &in : bwd_ins) {
S
sneaxiy 已提交
195 196 197 198 199 200
        in_str += in;
        in_str += " ";
      }
      VLOG(10) << in_str;

      std::string out_str = "PyFunc Grad Output: ";
S
sneaxiy 已提交
201
      for (auto &out : bwd_outs) {
S
sneaxiy 已提交
202
        out_str += out;
S
sneaxiy 已提交
203
        out_str += " ";
S
sneaxiy 已提交
204 205 206 207
      }
      VLOG(10) << out_str;
    }

S
sneaxiy 已提交
208 209
    grad_op->SetInput("X", bwd_ins);
    grad_op->SetOutput("Out", bwd_outs);
S
sneaxiy 已提交
210 211 212 213 214 215 216

    std::vector<std::unique_ptr<framework::OpDesc>> ret(1);
    ret[0] = std::move(grad_op);
    return ret;
  }
};

S
sneaxiy 已提交
217 218 219 220 221 222 223 224 225 226 227 228
class PyFuncOp : public framework::OperatorBase {
 public:
  using framework::OperatorBase::OperatorBase;

 protected:
  void RunImpl(const framework::Scope &scope,
               const platform::Place &place) const override {
    auto &in_arg_names = Inputs("X");
    auto &out_arg_names = Outputs("Out");

    std::vector<framework::LoDTensor> inputs(in_arg_names.size());
    for (size_t i = 0; i < in_arg_names.size(); ++i) {
S
sneaxiy 已提交
229 230 231 232 233 234 235 236
      auto in_var = scope.FindVar(in_arg_names[i]);
      if (in_var == nullptr) {
        continue;
      }
      auto &in_tensor = in_var->Get<framework::LoDTensor>();
      if (!in_tensor.IsInitialized()) {
        continue;
      }
S
sneaxiy 已提交
237 238 239 240 241 242 243 244 245 246
      if (platform::is_gpu_place(in_tensor.place())) {
        framework::TensorCopySync(in_tensor, platform::CPUPlace(), &inputs[i]);
      } else {
        inputs[i].ShareDataWith(in_tensor);
      }
      inputs[i].set_lod(in_tensor.lod());
    }

    std::vector<framework::LoDTensor *> outputs(out_arg_names.size());
    for (size_t i = 0; i < out_arg_names.size(); ++i) {
S
sneaxiy 已提交
247
      auto *out_var = scope.FindVar(out_arg_names[i]);
S
sneaxiy 已提交
248
      auto *out_tensor =
S
sneaxiy 已提交
249
          out_var ? out_var->GetMutable<framework::LoDTensor>() : nullptr;
S
sneaxiy 已提交
250 251 252
      outputs[i] = out_tensor;
    }

S
sneaxiy 已提交
253 254 255 256 257
    auto callable_id = static_cast<size_t>(Attr<int>(kForwardPythonCallableId));
    auto *py_callable = GetPythonCallableObject(callable_id);
    VLOG(10) << "Call py_func_op with id " << callable_id << ": "
             << PythonObjectToString(*py_callable);
    CallPythonFunc(py_callable, inputs, &outputs);
S
sneaxiy 已提交
258 259 260 261 262 263 264 265 266
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

REGISTER_OPERATOR(py_func, ops::PyFuncOp, ops::PyFuncOpMaker,
S
sneaxiy 已提交
267
                  ops::PyFuncOpShapeInference, ops::PyFuncOpGradDescMaker);