diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index dcb39784a54120b70082b3e0f15e9384b329998e..2b95c2ed0f356619fb39db6fcef62809e5aa0c2f 100755 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -3115,9 +3115,7 @@ def recurrent_group(step, reverse=False, name=None, targetInlink=None, - is_generating=False, - in_args_converter=None, - boot_layer=None): + is_generating=False): """ Recurrent layer group is an extremely flexible recurrent unit in PaddlePaddle. As long as the user defines the calculation done within a @@ -3260,13 +3258,7 @@ def recurrent_group(step, assert (is_generating != has_LayerOutput) - if in_args_converter is None: - layer_outs = step(*in_args) - else: - # append boot_layer to the last of in_args - if boot_layer is not None: - in_args.append(boot_layer) - layer_outs = step(*in_args_converter(*in_args)).to_proto(dict()) + layer_outs = step(*in_args) if isinstance(layer_outs, LayerOutput): layer_outs = [layer_outs] diff --git a/python/paddle/v2/layer.py b/python/paddle/v2/layer.py index 71d0e54c0a6f85dcb7634dc03a612aa4e897a170..f1ca0b46ebc42ec6deb14f3767d1dc9ec51cfc7d 100644 --- a/python/paddle/v2/layer.py +++ b/python/paddle/v2/layer.py @@ -131,11 +131,6 @@ class Layer(object): if self.context_name() is None: return self.to_proto_impl(**kwargs) - elif isinstance(self, MemoryV2): - name = self.name + "#__memory__" - if name not in context: - context[name] = self.to_proto_impl(**kwargs) - return context[name] elif self.context_name() not in context: context[self.context_name()] = self.to_proto_impl(**kwargs) return context[self.name]