提交 fd3be087 编写于 作者: Q qiaolongfei

restore recurrent_group in v1

上级 73af1942
......@@ -3115,9 +3115,7 @@ def recurrent_group(step,
reverse=False,
name=None,
targetInlink=None,
is_generating=False,
in_args_converter=None,
boot_layer=None):
is_generating=False):
"""
Recurrent layer group is an extremely flexible recurrent unit in
PaddlePaddle. As long as the user defines the calculation done within a
......@@ -3260,13 +3258,7 @@ def recurrent_group(step,
assert (is_generating != has_LayerOutput)
if in_args_converter is None:
layer_outs = step(*in_args)
else:
# append boot_layer to the last of in_args
if boot_layer is not None:
in_args.append(boot_layer)
layer_outs = step(*in_args_converter(*in_args)).to_proto(dict())
layer_outs = step(*in_args)
if isinstance(layer_outs, LayerOutput):
layer_outs = [layer_outs]
......
......@@ -131,11 +131,6 @@ class Layer(object):
if self.context_name() is None:
return self.to_proto_impl(**kwargs)
elif isinstance(self, MemoryV2):
name = self.name + "#__memory__"
if name not in context:
context[name] = self.to_proto_impl(**kwargs)
return context[name]
elif self.context_name() not in context:
context[self.context_name()] = self.to_proto_impl(**kwargs)
return context[self.name]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册