diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index c207c5282f3b524e8deab9857b9fe0eddc9b4f55..1409acd5ca0aa3bef7e4c916296b99484929449a 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -2773,6 +2773,9 @@ class OpProtoHolder: return custom_op_names + def has_op_proto(self, type): + return type in self.op_proto_map + @staticmethod def generated_op_attr_names(): return { diff --git a/python/paddle/jit/translated_layer.py b/python/paddle/jit/translated_layer.py index 45563584f166d00128858c10c40b56068f2ada3a..be9e1bee3255894d4c934e1ce6269b45dd5d44cd 100644 --- a/python/paddle/jit/translated_layer.py +++ b/python/paddle/jit/translated_layer.py @@ -563,6 +563,11 @@ class _ProgramHolder: op.desc.set_output("ReserveSpace", [reserve_space.name]) continue + # There are some situations that users will add backward op in Forward + # function of Layer. And because backward op doesn't have proto. So, we + # should skip it when we meet it. + if not OpProtoHolder.instance().has_op_proto(op.type): + continue proto = OpProtoHolder.instance().get_op_proto(op.type) has_create_intermediate_out = False for output_proto in proto.outputs: