diff --git a/paddle/fluid/pybind/op_function_generator.cc b/paddle/fluid/pybind/op_function_generator.cc index fc60b6302e7ab0ab155e5430ccb8f0a4ba0e94b1..89ebb925363f57d68bb47b9e3b5fd133f8496811 100644 --- a/paddle/fluid/pybind/op_function_generator.cc +++ b/paddle/fluid/pybind/op_function_generator.cc @@ -52,6 +52,9 @@ std::map> op_ins_map = { std::map> op_outs_map = { {"fake_quantize_dequantize_moving_average_abs_max", {"Out", "OutScale", "OutAccum", "OutState"}}, + {"batch_norm", + {"Y", "MeanOut", "VarianceOut", "SavedMean", "SavedVariance", + "ReserveSpace"}}, }; // NOTE(zhiqiu): Commonly, the outputs in auto-generated OP function are diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 7cb4702fedcbac9a3dd8fc7bb941735a1cfbe435..680e47bf4221c6168c337821e0ef1826f200636e 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -1310,9 +1310,10 @@ class BatchNorm(layers.Layer): self._fuse_with_relu, "use_global_stats", self._use_global_stats, 'trainable_statistics', self._trainable_statistics) - batch_norm_out, _, _, _, _ = core.ops.batch_norm( + batch_norm_out, _, _, _, _, _ = core.ops.batch_norm( input, self.weight, self.bias, self._mean, self._variance, mean_out, variance_out, *attrs) + return dygraph_utils._append_activation_in_dygraph( batch_norm_out, act=self._act)