diff --git a/paddle/fluid/pybind/op_function_generator.h b/paddle/fluid/pybind/op_function_generator.h index d8750c1d6c115a6de8a493cac4ccadbd47bc10fd..0a389153b0ee4bc5de3309a4184afb02871cbccd 100644 --- a/paddle/fluid/pybind/op_function_generator.h +++ b/paddle/fluid/pybind/op_function_generator.h @@ -89,6 +89,7 @@ std::map> op_ins_map = { {"Input", "Label", "Weight", "Bias", "SampleWeight", "CustomDistProbs", "CustomDistAlias", "CustomDistAliasProbs"}}, {"check_finite_and_unscale", {"X", "Scale", "FloatStatus"}}, + {"group_norm", {"X", "Scale", "Bias"}}, }; // NOTE(zhiqiu): Like op_ins_map. diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 5bb1aef6d6e9b96a8492fe9fc76c7448a053e3bf..b41e3e0b502b591fe0c86fa2a48b99402cba68fe 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -2986,6 +2986,12 @@ class GroupNorm(layers.Layer): is_bias=True) def forward(self, input): + if in_dygraph_mode(): + attrs = ('epsilon', self._epsilon, 'groups', self._groups) + out, _, _ = _C_ops.group_norm(input, self.weight, self.bias, *attrs) + + return dygraph_utils._append_activation_in_dygraph(out, self._act) + inputs = {'X': input} if self.bias is not None: inputs['Bias'] = self.bias diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 36038d656b7736afc94da32c29c56ce61b338cb4..bb244a20bd873d34c6f01a4ec5a8b87018d71668 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -1819,7 +1819,7 @@ class TestLayer(LayerTest): self.assertTrue(np.allclose(static_ret, static_ret2)) - def test_group_norm(self): + def func_group_norm(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: @@ -1873,7 +1873,6 @@ class TestLayer(LayerTest): with_lod=True)[0] with self.dynamic_graph(): - # TODO(wuweilong): Add with _test_eager_guard(): groupNorm = nn.GroupNorm( channels=shape[1], groups=2, @@ -1886,6 +1885,11 @@ class TestLayer(LayerTest): self.assertTrue(np.allclose(static_ret, dy_rlt_value)) self.assertTrue(np.allclose(static_ret, static_ret2)) + def test_group_norm(self): + with _test_eager_guard(): + self.func_group_norm() + self.func_group_norm() + def test_instance_norm(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) @@ -2348,7 +2352,7 @@ class TestLayer(LayerTest): with self.assertRaises(TypeError): layers.eye(num_rows=3, batch_shape=[-1]) - def test_while_loop(self): + def func_while_loop(self): with self.static_graph(): i = layers.fill_constant(shape=[1], dtype='int64', value=0) ten = layers.fill_constant(shape=[1], dtype='int64', value=10) @@ -2363,7 +2367,6 @@ class TestLayer(LayerTest): static_ret = self.get_static_graph_result(feed={}, fetch_list=out) with self.dynamic_graph(): - # TODO(wuweilong): Add with _test_eager_guard(): i = layers.fill_constant(shape=[1], dtype='int64', value=0) ten = layers.fill_constant(shape=[1], dtype='int64', value=10) @@ -2384,6 +2387,11 @@ class TestLayer(LayerTest): self.assertTrue(np.array_equal(static_ret[0], dy_ret[0].numpy())) + def test_while_loop(self): + with _test_eager_guard(): + self.func_while_loop() + self.func_while_loop() + def test_compare(self): value_a = np.arange(3) value_b = np.arange(3)