diff --git a/python/paddle/fluid/tests/unittests/test_imperative_parallel_coalesce_split.py b/python/paddle/fluid/tests/unittests/test_imperative_parallel_coalesce_split.py index e5c32d0003835712bd226812bfae3dbd88577825..480df7482e30517d9de27ffc7b26c241ab411d41 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_parallel_coalesce_split.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_parallel_coalesce_split.py @@ -22,6 +22,7 @@ import paddle.fluid as fluid from paddle.fluid import core from paddle.fluid.dygraph.parallel import DataParallel from paddle.fluid.dygraph.base import to_variable +from paddle.fluid.dygraph.parallel import _coalesce_tensors, _split_tensors, _reshape_inplace class MyLayer(fluid.Layer): @@ -57,8 +58,8 @@ class TestImperativeParallelCoalesceSplit(unittest.TestCase): orig_var_shapes.append(var.shape) # execute interface - coalesced_vars = test_layer._coalesce_tensors(var_groups) - test_layer._split_tensors(coalesced_vars) + coalesced_vars = _coalesce_tensors(var_groups) + _split_tensors(coalesced_vars) # compare for orig_var_shape, var in zip(orig_var_shapes, vars): @@ -74,7 +75,7 @@ class TestImperativeParallelCoalesceSplit(unittest.TestCase): new_shape = [5, 10] x_data = np.random.random(ori_shape).astype("float32") x = to_variable(x_data) - test_layer._reshape_inplace(x, new_shape) + _reshape_inplace(x, new_shape) self.assertEqual(x.shape, new_shape) diff --git a/python/paddle/optimizer/adam.py b/python/paddle/optimizer/adam.py index 568f0b9d8f17ed32dcabf58ed4f868104c08c6c4..9cbb45ce60d1493c912928f073a9d82969c57c96 100644 --- a/python/paddle/optimizer/adam.py +++ b/python/paddle/optimizer/adam.py @@ -17,6 +17,7 @@ from ..fluid import core from ..fluid import framework from ..fluid.framework import Variable +import paddle from paddle.fluid.dygraph.parallel import apply_collective_grads __all__ = ["Adam"]