提交 aeb74af5 编写于 作者: X Xin Pan

allow operator to run imperatively

上级 b1f6fda5
...@@ -58,15 +58,7 @@ LoDTensor& GetFetchVariable(const Scope& scope, const std::string& var_name, ...@@ -58,15 +58,7 @@ LoDTensor& GetFetchVariable(const Scope& scope, const std::string& var_name,
LoDTensor& GetVariableTensor(const Scope& scope, const std::string& var_name) { LoDTensor& GetVariableTensor(const Scope& scope, const std::string& var_name) {
Variable* var = scope.FindVar(var_name); Variable* var = scope.FindVar(var_name);
PADDLE_ENFORCE(var, "%s no in scope", var_name); PADDLE_ENFORCE(var, "%s no in scope", var_name);
// TODO(panyx0718): hack, remove it once we run oprerator. PADDLE_ENFORCE(var->IsType<LoDTensor>(), "Only support lod tensor now.");
LoDTensor* tensor = var->GetMutable<LoDTensor>();
int numel = 10;
float* data =
tensor->mutable_data<float>(framework::make_ddim({numel}),
platform::CPUPlace(), sizeof(float) * numel);
for (size_t i = 0; i < numel; ++i) data[i] = 1;
PADDLE_ENFORCE(var->IsType<LoDTensor>(), "Variable is not LoDTensor");
return *var->GetMutable<LoDTensor>(); return *var->GetMutable<LoDTensor>();
} }
......
...@@ -38,9 +38,8 @@ void CheckProgram(const ProgramDesc &program) { ...@@ -38,9 +38,8 @@ void CheckProgram(const ProgramDesc &program) {
switch (role_id) { switch (role_id) {
case _INT(OpRole::kForward): case _INT(OpRole::kForward):
if (visit.find(_INT(OpRole::kBackward)) != visit.end()) { if (visit.find(_INT(OpRole::kBackward)) != visit.end()) {
LOG(ERROR) LOG(ERROR) << "Cannot add backward operator before forward operator "
<< "Cannot add backward operator before forward operator %s." << op->Type();
<< op->Type();
} }
break; break;
case _INT(OpRole::kBackward): case _INT(OpRole::kBackward):
......
...@@ -14,8 +14,12 @@ ...@@ -14,8 +14,12 @@
#pragma once #pragma once
#include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/imperative/engine.h" #include "paddle/fluid/imperative/engine.h"
namespace paddle { namespace paddle {
...@@ -26,10 +30,47 @@ class Tracer { ...@@ -26,10 +30,47 @@ class Tracer {
Tracer() {} Tracer() {}
void Trace(framework::OpDesc* op_desc) { void Trace(framework::OpDesc* op_desc) {
LOG(ERROR) << "tracing " << op_desc->Type(); LOG(ERROR) << "tracer tracing " << op_desc->Type();
op_desc->InferShape(*block_);
op_desc->InferVarType(block_);
std::unique_ptr<framework::OperatorBase> op =
framework::OpRegistry::CreateOp(*op_desc);
for (const std::string& vname : op_desc->InputArgumentNames()) {
framework::Variable* var = scope_->Var(vname);
if (!var->IsInitialized()) {
framework::VarDesc* var_desc = block_->FindVar(vname);
if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
var->GetMutable<framework::LoDTensor>();
} else {
LOG(ERROR) << "tracer doesn't support yet";
}
}
}
for (const std::string& vname : op_desc->OutputArgumentNames()) {
framework::Variable* var = scope_->Var(vname);
if (!var->IsInitialized()) {
framework::VarDesc* var_desc = block_->FindVar(vname);
if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) {
var->GetMutable<framework::LoDTensor>();
} else {
LOG(ERROR) << "tracer doesn't support yet";
}
}
}
op->Run(*scope_, platform::CPUPlace());
} }
void SetScope(framework::Scope* scope) { scope_ = scope; }
void SetBlock(framework::BlockDesc* block) { block_ = block; }
framework::Scope* Scope() const { return scope_; }
framework::BlockDesc* Block() const { return block_; }
private: private:
framework::BlockDesc* block_;
framework::Scope* scope_;
std::vector<Runnable*> runnables_; std::vector<Runnable*> runnables_;
}; };
......
...@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/pybind/imperative.h" #include "paddle/fluid/pybind/imperative.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/imperative/tracer.h" #include "paddle/fluid/imperative/tracer.h"
namespace paddle { namespace paddle {
...@@ -22,7 +24,19 @@ namespace pybind { ...@@ -22,7 +24,19 @@ namespace pybind {
void BindTracer(pybind11::module *m) { void BindTracer(pybind11::module *m) {
pybind11::class_<imperative::Tracer>(*m, "Tracer", "") pybind11::class_<imperative::Tracer>(*m, "Tracer", "")
.def(pybind11::init<>()) .def(pybind11::init<>())
.def("trace", &imperative::Tracer::Trace); .def("trace", &imperative::Tracer::Trace)
.def_property("scope",
[](const imperative::Tracer &self) { return self.Scope(); },
[](imperative::Tracer &self, framework::Scope *scope) {
self.SetScope(scope);
},
R"DOC()DOC")
.def_property("block",
[](const imperative::Tracer &self) { return self.Block(); },
[](imperative::Tracer &self, framework::BlockDesc *block) {
self.SetBlock(block);
},
R"DOC()DOC");
} }
} // namespace pybind } // namespace pybind
......
...@@ -159,6 +159,7 @@ PYBIND11_MODULE(core, m) { ...@@ -159,6 +159,7 @@ PYBIND11_MODULE(core, m) {
self.mutable_data<float>(place); self.mutable_data<float>(place);
}) })
.def("set", PyCPUTensorSetFromArray<float>) .def("set", PyCPUTensorSetFromArray<float>)
.def("set_float", PyCPUTensorSetFromArray<float>)
.def("set", PyCPUTensorSetFromArray<int>) .def("set", PyCPUTensorSetFromArray<int>)
.def("set", PyCPUTensorSetFromArray<double>) .def("set", PyCPUTensorSetFromArray<double>)
.def("set", PyCPUTensorSetFromArray<int64_t>) .def("set", PyCPUTensorSetFromArray<int64_t>)
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import contextlib
import sys import sys
import numpy as np import numpy as np
...@@ -21,33 +22,46 @@ from paddle.fluid import framework ...@@ -21,33 +22,46 @@ from paddle.fluid import framework
__all__ = ['PyLayer'] __all__ = ['PyLayer']
@contextlib.contextmanager
def trace_scope(scope, block):
tmp_scope = framework._imperative_tracer().scope
tmp_block = framework._imperative_tracer().block
framework._imperative_tracer().scope = scope
framework._imperative_tracer().block = block
yield
framework._imperative_tracer().scope = tmp_scope
framework._imperative_tracer().block = tmp_block
class PyLayer(core.Layer): class PyLayer(core.Layer):
def __init__(self): def __init__(self):
self._scope = core.Scope() self._scope = core.Scope()
self._block = framework.default_main_program().current_block()
def __call__(self, inputs): def __call__(self, inputs):
if not isinstance(inputs, list) and not isinstance(inputs, tuple): with trace_scope(self._scope, self._block.desc):
inputs = [inputs] if not isinstance(inputs, list) and not isinstance(inputs, tuple):
inputs = [inputs]
var_inputs = []
for x in inputs: var_inputs = []
if isinstance(x, np.ndarray): for x in inputs:
tensor = core.LoDTensor() if isinstance(x, np.ndarray):
tensor.set(x, core.CPUPlace()) py_var = framework.Variable(
x = framework.Variable( self._block,
framework.default_main_program().current_block(), type=core.VarDesc.VarType.LOD_TENSOR,
type=core.VarDesc.VarType.LOD_TENSOR, name=None,
name=None, shape=x.shape,
shape=x.shape, dtype=x.dtype)
dtype=x.dtype) var = self._scope.var(py_var.name)
elif not isinstance(x, framework.Variable): tensor = var.get_tensor()
raise ValueError("not var or ndarray %s" % type(x)) tensor.set_float(x, core.CPUPlace())
self._scope.var(x.name) var_inputs.append(py_var)
var_inputs.append(x) elif isinstance(x, framework.Variable):
outputs = self.forward(var_inputs) var_inputs.append(x)
for out in outputs: else:
self._scope.var(out.name) raise ValueError("not var or ndarray %s" % type(x))
return outputs outputs = self.forward(var_inputs)
return outputs
def forward(self, inputs): def forward(self, inputs):
print("at python.") print("at python.")
......
...@@ -17,6 +17,7 @@ All layers just related to the neural network. ...@@ -17,6 +17,7 @@ All layers just related to the neural network.
from __future__ import print_function from __future__ import print_function
import sys
import numpy as np import numpy as np
import os import os
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
......
...@@ -31,25 +31,18 @@ class MyLayer(fluid.imperative.PyLayer): ...@@ -31,25 +31,18 @@ class MyLayer(fluid.imperative.PyLayer):
class TestImperative(unittest.TestCase): class TestImperative(unittest.TestCase):
def test_layer(self): def test_layer(self):
cl = core.Layer()
cl.forward([])
l = fluid.imperative.PyLayer()
l.forward([])
def test_imperative_trace(self):
with fluid.imperative.guard(): with fluid.imperative.guard():
self.assertTrue(fluid.imperative.enabled()) cl = core.Layer()
x = fluid.layers.data(name='abc', shape=[3, 4], dtype='float32') cl.forward([])
for _ in xrange(2): l = fluid.imperative.PyLayer()
x = fluid.layers.relu(x) l.forward([])
x = fluid.layers.elementwise_mul(x, x)
self.assertIsNotNone(x)
def test_layer_in_out(self): def test_layer_in_out(self):
l = MyLayer() with fluid.imperative.guard():
x = l(np.ones([1], np.float32))[0] l = MyLayer()
self.assertIsNotNone(x) x = l(np.array([1.0, 2.0, -1.0], dtype=np.float32))[0]
sys.stderr.write("%s output: %s\n" % (x, x.numpy(scope=l._scope))) self.assertIsNotNone(x)
sys.stderr.write("%s output: %s\n" % (x, x.numpy(scope=l._scope)))
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册