提交 aeb74af5 编写于 作者: X Xin Pan

allow operator to run imperatively

上级 b1f6fda5
......@@ -58,15 +58,7 @@ LoDTensor& GetFetchVariable(const Scope& scope, const std::string& var_name,
LoDTensor& GetVariableTensor(const Scope& scope, const std::string& var_name) {
Variable* var = scope.FindVar(var_name);
PADDLE_ENFORCE(var, "%s no in scope", var_name);
// TODO(panyx0718): hack, remove it once we run oprerator.
LoDTensor* tensor = var->GetMutable<LoDTensor>();
int numel = 10;
float* data =
tensor->mutable_data<float>(framework::make_ddim({numel}),
platform::CPUPlace(), sizeof(float) * numel);
for (size_t i = 0; i < numel; ++i) data[i] = 1;
PADDLE_ENFORCE(var->IsType<LoDTensor>(), "Variable is not LoDTensor");
PADDLE_ENFORCE(var->IsType<LoDTensor>(), "Only support lod tensor now.");
return *var->GetMutable<LoDTensor>();
}
......
......@@ -38,8 +38,7 @@ void CheckProgram(const ProgramDesc &program) {
switch (role_id) {
case _INT(OpRole::kForward):
if (visit.find(_INT(OpRole::kBackward)) != visit.end()) {
LOG(ERROR)
<< "Cannot add backward operator before forward operator %s."
LOG(ERROR) << "Cannot add backward operator before forward operator "
<< op->Type();
}
break;
......
......@@ -14,8 +14,12 @@
#pragma once
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/imperative/engine.h"
namespace paddle {
......@@ -26,10 +30,47 @@ class Tracer {
Tracer() {}
void Trace(framework::OpDesc* op_desc) {
LOG(ERROR) << "tracing " << op_desc->Type();
LOG(ERROR) << "tracer tracing " << op_desc->Type();
op_desc->InferShape(*block_);
op_desc->InferVarType(block_);
std::unique_ptr<framework::OperatorBase> op =
framework::OpRegistry::CreateOp(*op_desc);
for (const std::string& vname : op_desc->InputArgumentNames()) {
framework::Variable* var = scope_->Var(vname);
if (!var->IsInitialized()) {
framework::VarDesc* var_desc = block_->FindVar(vname);
if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
var->GetMutable<framework::LoDTensor>();
} else {
LOG(ERROR) << "tracer doesn't support yet";
}
}
}
for (const std::string& vname : op_desc->OutputArgumentNames()) {
framework::Variable* var = scope_->Var(vname);
if (!var->IsInitialized()) {
framework::VarDesc* var_desc = block_->FindVar(vname);
if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
var->GetMutable<framework::LoDTensor>();
} else {
LOG(ERROR) << "tracer doesn't support yet";
}
}
}
op->Run(*scope_, platform::CPUPlace());
}
void SetScope(framework::Scope* scope) { scope_ = scope; }
void SetBlock(framework::BlockDesc* block) { block_ = block; }
framework::Scope* Scope() const { return scope_; }
framework::BlockDesc* Block() const { return block_; }
private:
framework::BlockDesc* block_;
framework::Scope* scope_;
std::vector<Runnable*> runnables_;
};
......
......@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/pybind/imperative.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/imperative/tracer.h"
namespace paddle {
......@@ -22,7 +24,19 @@ namespace pybind {
void BindTracer(pybind11::module *m) {
pybind11::class_<imperative::Tracer>(*m, "Tracer", "")
.def(pybind11::init<>())
.def("trace", &imperative::Tracer::Trace);
.def("trace", &imperative::Tracer::Trace)
.def_property("scope",
[](const imperative::Tracer &self) { return self.Scope(); },
[](imperative::Tracer &self, framework::Scope *scope) {
self.SetScope(scope);
},
R"DOC()DOC")
.def_property("block",
[](const imperative::Tracer &self) { return self.Block(); },
[](imperative::Tracer &self, framework::BlockDesc *block) {
self.SetBlock(block);
},
R"DOC()DOC");
}
} // namespace pybind
......
......@@ -159,6 +159,7 @@ PYBIND11_MODULE(core, m) {
self.mutable_data<float>(place);
})
.def("set", PyCPUTensorSetFromArray<float>)
.def("set_float", PyCPUTensorSetFromArray<float>)
.def("set", PyCPUTensorSetFromArray<int>)
.def("set", PyCPUTensorSetFromArray<double>)
.def("set", PyCPUTensorSetFromArray<int64_t>)
......
......@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import sys
import numpy as np
......@@ -21,32 +22,45 @@ from paddle.fluid import framework
__all__ = ['PyLayer']
@contextlib.contextmanager
def trace_scope(scope, block):
tmp_scope = framework._imperative_tracer().scope
tmp_block = framework._imperative_tracer().block
framework._imperative_tracer().scope = scope
framework._imperative_tracer().block = block
yield
framework._imperative_tracer().scope = tmp_scope
framework._imperative_tracer().block = tmp_block
class PyLayer(core.Layer):
def __init__(self):
self._scope = core.Scope()
self._block = framework.default_main_program().current_block()
def __call__(self, inputs):
with trace_scope(self._scope, self._block.desc):
if not isinstance(inputs, list) and not isinstance(inputs, tuple):
inputs = [inputs]
var_inputs = []
for x in inputs:
if isinstance(x, np.ndarray):
tensor = core.LoDTensor()
tensor.set(x, core.CPUPlace())
x = framework.Variable(
framework.default_main_program().current_block(),
py_var = framework.Variable(
self._block,
type=core.VarDesc.VarType.LOD_TENSOR,
name=None,
shape=x.shape,
dtype=x.dtype)
elif not isinstance(x, framework.Variable):
raise ValueError("not var or ndarray %s" % type(x))
self._scope.var(x.name)
var = self._scope.var(py_var.name)
tensor = var.get_tensor()
tensor.set_float(x, core.CPUPlace())
var_inputs.append(py_var)
elif isinstance(x, framework.Variable):
var_inputs.append(x)
else:
raise ValueError("not var or ndarray %s" % type(x))
outputs = self.forward(var_inputs)
for out in outputs:
self._scope.var(out.name)
return outputs
def forward(self, inputs):
......
......@@ -17,6 +17,7 @@ All layers just related to the neural network.
from __future__ import print_function
import sys
import numpy as np
import os
from ..layer_helper import LayerHelper
......
......@@ -31,23 +31,16 @@ class MyLayer(fluid.imperative.PyLayer):
class TestImperative(unittest.TestCase):
def test_layer(self):
with fluid.imperative.guard():
cl = core.Layer()
cl.forward([])
l = fluid.imperative.PyLayer()
l.forward([])
def test_imperative_trace(self):
with fluid.imperative.guard():
self.assertTrue(fluid.imperative.enabled())
x = fluid.layers.data(name='abc', shape=[3, 4], dtype='float32')
for _ in xrange(2):
x = fluid.layers.relu(x)
x = fluid.layers.elementwise_mul(x, x)
self.assertIsNotNone(x)
def test_layer_in_out(self):
with fluid.imperative.guard():
l = MyLayer()
x = l(np.ones([1], np.float32))[0]
x = l(np.array([1.0, 2.0, -1.0], dtype=np.float32))[0]
self.assertIsNotNone(x)
sys.stderr.write("%s output: %s\n" % (x, x.numpy(scope=l._scope)))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册