未验证 提交 44def66a 编写于 作者: J Jiabin Yang 提交者: GitHub

Remove cpp layer (#37730)

* optimizer __call__ to make dygraph faster

* fix return type

* remove cpp Layer
上级 0adc2006
...@@ -281,16 +281,6 @@ class VarBase { ...@@ -281,16 +281,6 @@ class VarBase {
static ThreadSafeNameSet name_set_; static ThreadSafeNameSet name_set_;
}; };
class Layer {
public:
virtual ~Layer() {}
virtual std::vector<std::shared_ptr<VarBase>> Forward(
const std::vector<std::shared_ptr<VarBase>>& inputs) {
return {};
}
};
std::shared_ptr<GradOpNode> CreateGradOpNode( std::shared_ptr<GradOpNode> CreateGradOpNode(
const framework::OperatorBase& op, const NameVarBaseMap& ins, const framework::OperatorBase& op, const NameVarBaseMap& ins,
const NameVarBaseMap& outs, const framework::AttributeMap& attrs, const NameVarBaseMap& outs, const framework::AttributeMap& attrs,
......
...@@ -59,18 +59,6 @@ PyTypeObject *g_varbase_pytype = nullptr; ...@@ -59,18 +59,6 @@ PyTypeObject *g_varbase_pytype = nullptr;
namespace py = ::pybind11; namespace py = ::pybind11;
class Layer : public imperative::Layer {
public:
using imperative::Layer::Layer; // Inherit constructors
std::vector<std::shared_ptr<imperative::VarBase>> Forward(
const std::vector<std::shared_ptr<imperative::VarBase>> &inputs)
override {
PYBIND11_OVERLOAD(std::vector<std::shared_ptr<imperative::VarBase>>, Layer,
Forward, inputs); // NOLINT
}
};
template <typename T> template <typename T>
static T PyObjectCast(PyObject *obj) { static T PyObjectCast(PyObject *obj) {
try { try {
...@@ -2051,18 +2039,6 @@ void BindImperative(py::module *m_ptr) { ...@@ -2051,18 +2039,6 @@ void BindImperative(py::module *m_ptr) {
.def_property_readonly("type", &imperative::VarBase::Type) .def_property_readonly("type", &imperative::VarBase::Type)
.def_property_readonly("dtype", &imperative::VarBase::DataType); .def_property_readonly("dtype", &imperative::VarBase::DataType);
// NOTE(zhiqiu): set the metaclass of Layer.
// See details: https://github.com/pybind/pybind11/pull/679
// https://github.com/pybind/pybind11/blob/028812ae7eee307dca5f8f69d467af7b92cc41c8/tests/test_methods_and_attributes.cpp#L284
py::class_<imperative::Layer, Layer /* <--- trampoline*/> layer(
m, "Layer", py::metaclass((PyObject *)&PyType_Type)); // NOLINT
layer.def(py::init<>())
.def("forward",
[](imperative::Layer &self,
const std::vector<std::shared_ptr<imperative::VarBase>> &inputs) {
return self.Forward(inputs);
});
py::class_<imperative::jit::ProgramDescTracer>(m, "ProgramDescTracer", "") py::class_<imperative::jit::ProgramDescTracer>(m, "ProgramDescTracer", "")
.def("create_program_desc", .def("create_program_desc",
&imperative::jit::ProgramDescTracer::CreateProgramDesc) &imperative::jit::ProgramDescTracer::CreateProgramDesc)
......
...@@ -78,7 +78,7 @@ class HookRemoveHelper(object): ...@@ -78,7 +78,7 @@ class HookRemoveHelper(object):
del hooks[self._hook_id] del hooks[self._hook_id]
class Layer(core.Layer): class Layer(object):
""" """
Dynamic graph Layer based on OOD, includes the parameters of the layer, the structure of the forward graph and so on. Dynamic graph Layer based on OOD, includes the parameters of the layer, the structure of the forward graph and so on.
...@@ -976,7 +976,7 @@ class Layer(core.Layer): ...@@ -976,7 +976,7 @@ class Layer(core.Layer):
for prefix, layer in model.named_sublayers(): for prefix, layer in model.named_sublayers():
print(prefix, layer) print(prefix, layer)
""" """
assert (isinstance(sublayer, core.Layer) or sublayer == None) assert (isinstance(sublayer, Layer) or sublayer == None)
self._sub_layers[name] = sublayer self._sub_layers[name] = sublayer
return sublayer return sublayer
...@@ -1143,7 +1143,7 @@ class Layer(core.Layer): ...@@ -1143,7 +1143,7 @@ class Layer(core.Layer):
params[name] = None params[name] = None
else: else:
layers = self.__dict__.get('_sub_layers', None) layers = self.__dict__.get('_sub_layers', None)
if isinstance(value, core.Layer): if isinstance(value, Layer):
if layers is None: if layers is None:
raise ValueError( raise ValueError(
"super(YourLayer, self).__init__() should be called first" "super(YourLayer, self).__init__() should be called first"
......
...@@ -400,8 +400,6 @@ class TestImperative(unittest.TestCase): ...@@ -400,8 +400,6 @@ class TestImperative(unittest.TestCase):
def test_layer(self): def test_layer(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
cl = core.Layer()
cl.forward([])
l = fluid.Layer("l") l = fluid.Layer("l")
self.assertRaises(NotImplementedError, l.forward, []) self.assertRaises(NotImplementedError, l.forward, [])
......
...@@ -253,7 +253,7 @@ def _pickle_save(obj, f, protocol): ...@@ -253,7 +253,7 @@ def _pickle_save(obj, f, protocol):
dispatch_table_layer[layer.__class__] = reduce_Layer dispatch_table_layer[layer.__class__] = reduce_Layer
return layer return layer
_parse_every_object(obj, lambda v: isinstance(v, core.Layer), _parse_every_object(obj, lambda v: isinstance(v, fluid.Layer),
create_layer_dispatch_table) create_layer_dispatch_table)
def add_dispatch_table(): def add_dispatch_table():
...@@ -316,7 +316,7 @@ def _is_state_dict(obj): ...@@ -316,7 +316,7 @@ def _is_state_dict(obj):
if isinstance(obj, dict): if isinstance(obj, dict):
def condition(obj): def condition(obj):
return isinstance(obj, (core.Layer, Program, core.VarBase, return isinstance(obj, (fluid.Layer, Program, core.VarBase,
core.LoDTensor, core.SelectedRows)) core.LoDTensor, core.SelectedRows))
# If the value of a dict is a core.VarBase/LoDTensor or a dict # If the value of a dict is a core.VarBase/LoDTensor or a dict
...@@ -422,7 +422,7 @@ def _parse_every_object(obj, condition_func, convert_func): ...@@ -422,7 +422,7 @@ def _parse_every_object(obj, condition_func, convert_func):
def _parse_load_result(obj, return_numpy): def _parse_load_result(obj, return_numpy):
def is_layer(obj): def is_layer(obj):
return isinstance(obj, core.Layer) return isinstance(obj, fluid.Layer)
def parse_layer(obj): def parse_layer(obj):
temp_dict = _parse_load_result(obj.__dict__, False) temp_dict = _parse_load_result(obj.__dict__, False)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册