未验证 提交 39ff0f9c 编写于 作者: J Jiabin Yang 提交者: GitHub

Optimze/optimize dygraph api (#19999)

* test=develop, fix docker with paddle nccl problem

* test=develop, Add Variable api and refine dygraph related API

* test=develop, Add Variable api and refine dygraph related API

* test=develop, refine test for new api and error info

* test=develop, refine error info and test_layers

* test=develop, add API.spec

* test=devleop, fix to_string python2 and python3 compat error and refien doc

* test=devleop, add API spec

* test=devleop, update API spec

* test=devleop, update API spec

* test=develop, invoke ci

* test=develop, fix example code

* test=develop, update API spec

* test=develop, add compat test and fix inplace campat dict error
上级 f5221ac1
paddle.fluid.Program ('paddle.fluid.framework.Program', ('document', '7364a01d7b9132a435e46162c7fbd6c6')) paddle.fluid.Program ('paddle.fluid.framework.Program', ('document', '4f9e1829c89e0711355820e935d2b447'))
paddle.fluid.Program.__init__ (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.Program.__init__ (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.Program.block (ArgSpec(args=['self', 'index'], varargs=None, keywords=None, defaults=None), ('document', '86cd9499e226be661a3d686260ee1150')) paddle.fluid.Program.block (ArgSpec(args=['self', 'index'], varargs=None, keywords=None, defaults=None), ('document', '28d066e432ceda86810b1e7deb8a4afa'))
paddle.fluid.Program.clone (ArgSpec(args=['self', 'for_test'], varargs=None, keywords=None, defaults=(False,)), ('document', '11777d4121a64566a746e55497a4b78c')) paddle.fluid.Program.clone (ArgSpec(args=['self', 'for_test'], varargs=None, keywords=None, defaults=(False,)), ('document', '1e910e8c4186e8ff1afb62602f369033'))
paddle.fluid.Program.current_block (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'd601c7719e425e3d9cf862ea4ad194ca')) paddle.fluid.Program.current_block (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '365e49ce9f346ac6d54265e29db447b5'))
paddle.fluid.Program.global_block (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'd64ea1dc96e9f674499ea3006d470aa4')) paddle.fluid.Program.global_block (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'dd3f2b49147861d6ae48989a77482f05'))
paddle.fluid.Program.list_vars (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '32c14b0f12baae4b352200fa09b5e789')) paddle.fluid.Program.list_vars (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '757cf8d083dff9507676b17376ac5af1'))
paddle.fluid.Program.parse_from_string (ArgSpec(args=['binary_str'], varargs=None, keywords=None, defaults=None), ('document', 'b6a7ffb239a30bf2ce58cfaca8d8b8d5')) paddle.fluid.Program.parse_from_string (ArgSpec(args=['binary_str'], varargs=None, keywords=None, defaults=None), ('document', '70e063a0a09d5a8ed322db0d5de9edb4'))
paddle.fluid.Program.to_string (ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,)), ('document', '89acca639baf00f3ad08b9d827e81706')) paddle.fluid.Program.to_string (ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,)), ('document', '6dfb00cd50eb515dcf2548a68ea94bfb'))
paddle.fluid.default_startup_program (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', 'ba609cb02e4e55e8d626723567ef1778')) paddle.fluid.default_startup_program (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', 'accb52b28228f8e93a26fabdc960f56c'))
paddle.fluid.default_main_program (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '853718df675e59aea7104f3d61bbf11d')) paddle.fluid.default_main_program (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '853718df675e59aea7104f3d61bbf11d'))
paddle.fluid.program_guard (ArgSpec(args=['main_program', 'startup_program'], varargs=None, keywords=None, defaults=(None,)), ('document', '78fb5c7f70ef76bcf4a1862c3f6b8191')) paddle.fluid.program_guard (ArgSpec(args=['main_program', 'startup_program'], varargs=None, keywords=None, defaults=(None,)), ('document', '78fb5c7f70ef76bcf4a1862c3f6b8191'))
paddle.fluid.name_scope (ArgSpec(args=['prefix'], varargs=None, keywords=None, defaults=(None,)), ('document', '917d313881ff990de5fb18d98a9c7b42')) paddle.fluid.name_scope (ArgSpec(args=['prefix'], varargs=None, keywords=None, defaults=(None,)), ('document', '917d313881ff990de5fb18d98a9c7b42'))
...@@ -16,6 +16,15 @@ paddle.fluid.cpu_places (ArgSpec(args=['device_count'], varargs=None, keywords=N ...@@ -16,6 +16,15 @@ paddle.fluid.cpu_places (ArgSpec(args=['device_count'], varargs=None, keywords=N
paddle.fluid.cuda_pinned_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c2562241744aabe3fff1b59af22dd281')) paddle.fluid.cuda_pinned_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c2562241744aabe3fff1b59af22dd281'))
paddle.fluid.in_dygraph_mode (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '301bae0d8e02cc9eec5be02f052f11c6')) paddle.fluid.in_dygraph_mode (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '301bae0d8e02cc9eec5be02f052f11c6'))
paddle.fluid.is_compiled_with_cuda (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '60c7f107a5050aeb58bb74eb175672b5')) paddle.fluid.is_compiled_with_cuda (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '60c7f107a5050aeb58bb74eb175672b5'))
paddle.fluid.Variable ('paddle.fluid.framework.Variable', ('document', '65ff735c2b96673d7131f5ff6b0db40c'))
paddle.fluid.Variable.__init__ (ArgSpec(args=['self', 'block', 'type', 'name', 'shape', 'dtype', 'lod_level', 'capacity', 'persistable', 'error_clip', 'stop_gradient', 'is_data', 'need_check_feed'], varargs=None, keywords='kwargs', defaults=(VarType.LOD_TENSOR, None, None, None, None, None, None, None, False, False, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.Variable.astype (ArgSpec(args=['self', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '78541af4039262ed7ce3c447f8cc9cc1'))
paddle.fluid.Variable.backward (ArgSpec(args=['self', 'backward_strategy'], varargs=None, keywords=None, defaults=(None,)), ('document', 'cb928fa194da09694f4267f0a25268f1'))
paddle.fluid.Variable.clear_gradient (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '509a96d23c876fc5bfb10e1147e21d5f'))
paddle.fluid.Variable.detach (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '0730b2d310b014d9b0a903b2034757d7'))
paddle.fluid.Variable.gradient (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '86b246bfaf20f3058e91927abbcf9fb9'))
paddle.fluid.Variable.numpy (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '7536e8feb56d827875943e7f01d406fc'))
paddle.fluid.Variable.to_string (ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,)), ('document', '31f359a2c074f26dc0ffff296fc3983f'))
paddle.fluid.Executor ('paddle.fluid.executor.Executor', ('document', '34e8c1769313fbeff7817212dda6259e')) paddle.fluid.Executor ('paddle.fluid.executor.Executor', ('document', '34e8c1769313fbeff7817212dda6259e'))
paddle.fluid.Executor.__init__ (ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.Executor.__init__ (ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.Executor.close (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '3a584496aa1343f36eebf3c46b323a74')) paddle.fluid.Executor.close (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '3a584496aa1343f36eebf3c46b323a74'))
...@@ -573,7 +582,7 @@ paddle.fluid.dygraph.Layer.parameters (ArgSpec(args=['self', 'include_sublayers' ...@@ -573,7 +582,7 @@ paddle.fluid.dygraph.Layer.parameters (ArgSpec(args=['self', 'include_sublayers'
paddle.fluid.dygraph.Layer.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Layer.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Layer.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.Layer.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.Layer.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Layer.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.__impl__ (ArgSpec(args=['func'], varargs=None, keywords=None, defaults=()), ('document', 'fa71ad4e6c2b5bf2b5258bd1959f9b2a')) paddle.fluid.dygraph.__impl__ (ArgSpec(args=['func'], varargs=None, keywords=None, defaults=()), ('document', '75d1d3afccc8b39cdebf05cb1f5969f9'))
paddle.fluid.dygraph.guard (ArgSpec(args=['place'], varargs=None, keywords=None, defaults=(None,)), ('document', '7071320ffe2eec9aacdae574951278c6')) paddle.fluid.dygraph.guard (ArgSpec(args=['place'], varargs=None, keywords=None, defaults=(None,)), ('document', '7071320ffe2eec9aacdae574951278c6'))
paddle.fluid.dygraph.to_variable (ArgSpec(args=['value', 'block', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '0e69fa3666f15dd01b6e3e270b9371cd')) paddle.fluid.dygraph.to_variable (ArgSpec(args=['value', 'block', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '0e69fa3666f15dd01b6e3e270b9371cd'))
paddle.fluid.dygraph.Conv2D ('paddle.fluid.dygraph.nn.Conv2D', ('document', 'baafe7ae0d3a61ae79cf4c7443e2c37c')) paddle.fluid.dygraph.Conv2D ('paddle.fluid.dygraph.nn.Conv2D', ('document', 'baafe7ae0d3a61ae79cf4c7443e2c37c'))
......
...@@ -72,6 +72,18 @@ def to_text(obj, encoding='utf-8', inplace=False): ...@@ -72,6 +72,18 @@ def to_text(obj, encoding='utf-8', inplace=False):
return obj return obj
else: else:
return set([_to_text(item, encoding) for item in obj]) return set([_to_text(item, encoding) for item in obj])
elif isinstance(obj, dict):
if inplace:
new_obj = {}
for key, value in six.iteritems(obj):
new_obj[_to_text(key, encoding)] = _to_text(value, encoding)
obj.update(new_obj)
return obj
else:
new_obj = {}
for key, value in six.iteritems(obj):
new_obj[_to_text(key, encoding)] = _to_text(value, encoding)
return new_obj
else: else:
return _to_text(obj, encoding) return _to_text(obj, encoding)
...@@ -99,6 +111,8 @@ def _to_text(obj, encoding): ...@@ -99,6 +111,8 @@ def _to_text(obj, encoding):
return obj.decode(encoding) return obj.decode(encoding)
elif isinstance(obj, six.text_type): elif isinstance(obj, six.text_type):
return obj return obj
elif isinstance(obj, (bool, float)):
return obj
else: else:
return six.u(obj) return six.u(obj)
......
...@@ -21,7 +21,6 @@ import functools ...@@ -21,7 +21,6 @@ import functools
from . import layers from . import layers
from . import framework from . import framework
from . import core from . import core
from .dygraph.base import _not_support
__all__ = [ __all__ = [
'set_gradient_clip', 'set_gradient_clip',
...@@ -337,7 +336,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr): ...@@ -337,7 +336,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr):
return param, new_grad return param, new_grad
@_not_support @framework.dygraph_not_support
def set_gradient_clip(clip, param_list=None, program=None): def set_gradient_clip(clip, param_list=None, program=None):
""" """
To specify parameters that require gradient clip. To specify parameters that require gradient clip.
......
...@@ -45,21 +45,12 @@ def _switch_tracer_mode_guard_(is_train=True): ...@@ -45,21 +45,12 @@ def _switch_tracer_mode_guard_(is_train=True):
yield yield
def _dygraph_not_support_(func):
def __impl__(*args, **kwargs):
assert not framework.in_dygraph_mode(
), "We don't support %s in Dygraph mode" % func.__name__
return func(*args, **kwargs)
return __impl__
def _no_grad_(func): def _no_grad_(func):
""" """
This Decorator will avoid the func being decorated creating backward network in dygraph mode This Decorator will avoid the func being decorated creating backward network in dygraph mode
Args: Parameter:
func: the func don't need grad - **func** (python func): the func don't need grad
Examples: Examples:
...@@ -92,7 +83,6 @@ def _no_grad_(func): ...@@ -92,7 +83,6 @@ def _no_grad_(func):
no_grad = wrap_decorator(_no_grad_) no_grad = wrap_decorator(_no_grad_)
# for fluidDoc # for fluidDoc
no_grad.__doc__ = _no_grad_.__doc__ no_grad.__doc__ = _no_grad_.__doc__
_not_support = wrap_decorator(_dygraph_not_support_)
@signature_safe_contextmanager @signature_safe_contextmanager
...@@ -157,6 +147,7 @@ def _print_debug_msg(limit=5, is_test=False): ...@@ -157,6 +147,7 @@ def _print_debug_msg(limit=5, is_test=False):
return unique_name_size, tracer_var_size, alive_cpp_var_size return unique_name_size, tracer_var_size, alive_cpp_var_size
@framework.dygraph_only
def to_variable(value, block=None, name=None): def to_variable(value, block=None, name=None):
""" """
This function will create a variable from ndarray This function will create a variable from ndarray
......
此差异已折叠。
...@@ -135,6 +135,22 @@ class TestCompatible(unittest.TestCase): ...@@ -135,6 +135,22 @@ class TestCompatible(unittest.TestCase):
self.assertEqual(l, l2) self.assertEqual(l, l2)
self.assertEqual(set([u"", u"123", u"321"]), l2) self.assertEqual(set([u"", u"123", u"321"]), l2)
# check dict types, not inplace
l = {"": ""}
l2 = cpt.to_text(l, inplace=False)
self.assertTrue(isinstance(l2, dict))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual({"": ""}, l2)
# check dict types, inplace
l = {"": ""}
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, dict))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual({"": ""}, l2)
elif six.PY3: elif six.PY3:
self.assertIsNone(cpt.to_text(None)) self.assertIsNone(cpt.to_text(None))
...@@ -236,6 +252,22 @@ class TestCompatible(unittest.TestCase): ...@@ -236,6 +252,22 @@ class TestCompatible(unittest.TestCase):
for i in l2: for i in l2:
self.assertTrue(isinstance(i, str)) self.assertTrue(isinstance(i, str))
# check dict types, not inplace
l = {"": ""}
l2 = cpt.to_text(l, inplace=False)
self.assertTrue(isinstance(l2, dict))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual({"": ""}, l2)
# check dict types, inplace
l = {"": ""}
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, dict))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual({"": ""}, l2)
def test_to_bytes(self): def test_to_bytes(self):
# Only support python2.x and python3.x now # Only support python2.x and python3.x now
self.assertTrue(six.PY2 | six.PY3) self.assertTrue(six.PY2 | six.PY3)
......
...@@ -155,8 +155,11 @@ class Test_Detach(unittest.TestCase): ...@@ -155,8 +155,11 @@ class Test_Detach(unittest.TestCase):
try: try:
y_detach = y.detach() y_detach = y.detach()
except Exception as e: except Exception as e:
assert type(e) == AttributeError # Here is to check
assert str(e) == 'static graph model DO NOT supprt detach' assert type(e) == AssertionError
assert str(
e
) == 'We Only support detach in Dygraph mode, please use fluid.dygraph.guard() as context to run it in Dygraph Mode'
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -207,6 +207,59 @@ class TestImperative(unittest.TestCase): ...@@ -207,6 +207,59 @@ class TestImperative(unittest.TestCase):
a = inputs2[0].gradient() a = inputs2[0].gradient()
self.assertTrue(np.allclose(inputs2[0].gradient(), x)) self.assertTrue(np.allclose(inputs2[0].gradient(), x))
def test_empty_var(self):
with fluid.dygraph.guard():
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(
name="X", shape=[-1, 23, 48], dtype='float32')
try:
new_variable.numpy()
except Exception as e:
assert type(e) == ValueError
try:
new_variable.backward()
except Exception as e:
assert type(e) == ValueError
try:
new_variable.clear_gradient()
except Exception as e:
assert type(e) == ValueError
def test_empty_grad(self):
with fluid.dygraph.guard():
x = np.ones([2, 2], np.float32)
new_var = fluid.dygraph.base.to_variable(x)
try:
new_var.gradient()
except Exception as e:
assert type(e) == ValueError
try:
new_var.clear_gradient()
except Exception as e:
assert type(e) == ValueError
with fluid.dygraph.guard():
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(
name="X", shape=[-1, 23, 48], dtype='float32')
try:
new_variable.gradient()
except Exception as e:
assert type(e) == ValueError
def test_set_persistable(self):
with fluid.dygraph.guard():
x = np.ones([2, 2], np.float32)
new_var = fluid.dygraph.base.to_variable(x)
self.assertFalse(new_var.persistable)
new_var.persistable = True
self.assertFalse(new_var.persistable)
def test_layer(self): def test_layer(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
cl = core.Layer() cl = core.Layer()
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
import unittest import unittest
from test_imperative_base import new_program_scope from test_imperative_base import new_program_scope
...@@ -30,7 +31,7 @@ class TestTracerMode(unittest.TestCase): ...@@ -30,7 +31,7 @@ class TestTracerMode(unittest.TestCase):
self.assertEqual(self.tracer._train_mode, False) self.assertEqual(self.tracer._train_mode, False)
return a return a
@fluid.dygraph.base._not_support @framework.dygraph_not_support
def not_support_func(self): def not_support_func(self):
return True return True
......
...@@ -56,7 +56,7 @@ class TestDygraphFramework(unittest.TestCase): ...@@ -56,7 +56,7 @@ class TestDygraphFramework(unittest.TestCase):
out.backward() out.backward()
raise AssertionError( raise AssertionError(
"backward should not be usable in static graph mode") "backward should not be usable in static graph mode")
except ValueError as e: except AssertionError as e:
self.assertTrue((e is not None)) self.assertTrue((e is not None))
def test_dygraph_to_string(self): def test_dygraph_to_string(self):
......
...@@ -135,6 +135,8 @@ class TestDygraphGNN(unittest.TestCase): ...@@ -135,6 +135,8 @@ class TestDygraphGNN(unittest.TestCase):
adam.minimize(loss) adam.minimize(loss)
model.clear_gradients() model.clear_gradients()
loss_value = loss.numpy()
model_gc_weight_value = model.gc.weight.numpy()
with fluid.dygraph.guard(): with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
...@@ -157,12 +159,14 @@ class TestDygraphGNN(unittest.TestCase): ...@@ -157,12 +159,14 @@ class TestDygraphGNN(unittest.TestCase):
adam2 = AdamOptimizer(learning_rate=1e-3) adam2 = AdamOptimizer(learning_rate=1e-3)
adam2.minimize(loss2) adam2.minimize(loss2)
model2.clear_gradients() model2.clear_gradients()
loss2_value = loss2.numpy()
self.assertEqual(static_loss, loss.numpy()) model2_gc_weight_value = model2.gc.weight.numpy()
self.assertTrue(np.allclose(static_weight, model.gc.weight.numpy()))
self.assertEqual(static_loss, loss2.numpy()) self.assertEqual(static_loss, loss_value)
self.assertTrue(np.allclose(static_weight, model2.gc.weight.numpy())) self.assertTrue(np.allclose(static_weight, model_gc_weight_value))
sys.stderr.write('%s %s\n' % (static_loss, loss.numpy())) self.assertEqual(static_loss, loss2_value)
self.assertTrue(np.allclose(static_weight, model2_gc_weight_value))
sys.stderr.write('%s %s\n' % (static_loss, loss_value))
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -264,6 +264,10 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -264,6 +264,10 @@ class TestDygraphPtbRnn(unittest.TestCase):
for param in ptb_model.parameters(): for param in ptb_model.parameters():
dy_param_updated[param.name] = param.numpy() dy_param_updated[param.name] = param.numpy()
dy_loss_value = dy_loss.numpy()
dy_last_cell_value = last_cell.numpy()
dy_last_hidden_value = last_hidden.numpy()
with new_program_scope(): with new_program_scope():
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
...@@ -330,11 +334,11 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -330,11 +334,11 @@ class TestDygraphPtbRnn(unittest.TestCase):
static_param_updated[static_param_name_list[k - static_param_updated[static_param_name_list[k -
3]] = out[k] 3]] = out[k]
self.assertTrue(np.array_equal(static_loss_value, dy_loss.numpy())) self.assertTrue(np.array_equal(static_loss_value, dy_loss_value))
self.assertTrue( self.assertTrue(
np.array_equal(static_last_cell_value, last_cell.numpy())) np.array_equal(static_last_cell_value, dy_last_cell_value))
self.assertTrue( self.assertTrue(
np.array_equal(static_last_hidden_value, last_hidden.numpy())) np.array_equal(static_last_hidden_value, dy_last_hidden_value))
for key, value in six.iteritems(static_param_init): for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, dy_param_init[key])) self.assertTrue(np.array_equal(value, dy_param_init[key]))
for key, value in six.iteritems(static_param_updated): for key, value in six.iteritems(static_param_updated):
......
...@@ -84,6 +84,10 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): ...@@ -84,6 +84,10 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase):
for param in ptb_model.parameters(): for param in ptb_model.parameters():
dy_param_updated[param.name] = param.numpy() dy_param_updated[param.name] = param.numpy()
dy_loss_value = dy_loss.numpy()
dy_last_cell_value = last_cell.numpy()
dy_last_hidden_value = last_hidden.numpy()
with new_program_scope(): with new_program_scope():
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
...@@ -150,11 +154,11 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): ...@@ -150,11 +154,11 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase):
static_param_updated[static_param_name_list[k - static_param_updated[static_param_name_list[k -
3]] = out[k] 3]] = out[k]
self.assertTrue(np.array_equal(static_loss_value, dy_loss.numpy())) self.assertTrue(np.array_equal(static_loss_value, dy_loss_value))
self.assertTrue( self.assertTrue(
np.array_equal(static_last_cell_value, last_cell.numpy())) np.array_equal(static_last_cell_value, dy_last_cell_value))
self.assertTrue( self.assertTrue(
np.array_equal(static_last_hidden_value, last_hidden.numpy())) np.array_equal(static_last_hidden_value, dy_last_hidden_value))
for key, value in six.iteritems(static_param_init): for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, dy_param_init[key])) self.assertTrue(np.array_equal(value, dy_param_init[key]))
for key, value in six.iteritems(static_param_updated): for key, value in six.iteritems(static_param_updated):
......
...@@ -993,6 +993,11 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): ...@@ -993,6 +993,11 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
for param in transformer.parameters(): for param in transformer.parameters():
dy_param_updated[param.name] = param.numpy() dy_param_updated[param.name] = param.numpy()
dy_avg_cost_value = dy_avg_cost.numpy()
dy_sum_cost_value = dy_sum_cost.numpy()
dy_predict_value = dy_predict.numpy()
dy_token_num_value = dy_token_num.numpy()
with new_program_scope(): with new_program_scope():
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
...@@ -1067,13 +1072,12 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): ...@@ -1067,13 +1072,12 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
4]] = out[k] 4]] = out[k]
self.assertTrue( self.assertTrue(
np.array_equal(static_avg_cost_value, dy_avg_cost.numpy())) np.array_equal(static_avg_cost_value, dy_avg_cost_value))
self.assertTrue(
np.array_equal(static_sum_cost_value, dy_sum_cost.numpy()))
self.assertTrue( self.assertTrue(
np.array_equal(static_predict_value, dy_predict.numpy())) np.array_equal(static_sum_cost_value, dy_sum_cost_value))
self.assertTrue(np.array_equal(static_predict_value, dy_predict_value))
self.assertTrue( self.assertTrue(
np.array_equal(static_token_num_value, dy_token_num.numpy())) np.array_equal(static_token_num_value, dy_token_num_value))
for key, value in six.iteritems(static_param_init): for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, dy_param_init[key])) self.assertTrue(np.array_equal(value, dy_param_init[key]))
......
...@@ -112,9 +112,10 @@ class TestLayer(LayerTest): ...@@ -112,9 +112,10 @@ class TestLayer(LayerTest):
fc2 = nn.FC('fc2', size=4) fc2 = nn.FC('fc2', size=4)
ret = fc1(t) ret = fc1(t)
dy_ret = fc2(ret) dy_ret = fc2(ret)
dy_ret_value = dy_ret.numpy()
self.assertTrue(np.array_equal(static_ret, static_ret2)) self.assertTrue(np.array_equal(static_ret, static_ret2))
self.assertTrue(np.array_equal(static_ret, dy_ret.numpy())) self.assertTrue(np.array_equal(static_ret, dy_ret_value))
def test_layer_norm(self): def test_layer_norm(self):
inp = np.ones([3, 32, 32], dtype='float32') inp = np.ones([3, 32, 32], dtype='float32')
...@@ -149,6 +150,7 @@ class TestLayer(LayerTest): ...@@ -149,6 +150,7 @@ class TestLayer(LayerTest):
bias_attr=fluid.initializer.ConstantInitializer(value=1), bias_attr=fluid.initializer.ConstantInitializer(value=1),
act='sigmoid') act='sigmoid')
dy_ret = lm(base.to_variable(inp)) dy_ret = lm(base.to_variable(inp))
dy_ret_value = dy_ret.numpy()
with self.dynamic_graph(): with self.dynamic_graph():
lm = nn.LayerNorm( lm = nn.LayerNorm(
'layer_norm', 'layer_norm',
...@@ -163,7 +165,7 @@ class TestLayer(LayerTest): ...@@ -163,7 +165,7 @@ class TestLayer(LayerTest):
self.assertFalse(hasattr(lm, "_bias_w")) self.assertFalse(hasattr(lm, "_bias_w"))
self.assertTrue(np.array_equal(static_ret, static_ret2)) self.assertTrue(np.array_equal(static_ret, static_ret2))
self.assertTrue(np.array_equal(dy_ret.numpy(), static_ret2)) self.assertTrue(np.array_equal(dy_ret_value, static_ret2))
def test_relu(self): def test_relu(self):
with self.static_graph(): with self.static_graph():
...@@ -176,8 +178,9 @@ class TestLayer(LayerTest): ...@@ -176,8 +178,9 @@ class TestLayer(LayerTest):
with self.dynamic_graph(): with self.dynamic_graph():
t = np.ones([3, 3], dtype='float32') t = np.ones([3, 3], dtype='float32')
dy_ret = layers.relu(base.to_variable(t)) dy_ret = layers.relu(base.to_variable(t))
dy_ret_value = dy_ret.numpy()
self.assertTrue(np.allclose(static_ret, dy_ret.numpy())) self.assertTrue(np.allclose(static_ret, dy_ret_value))
def test_matmul(self): def test_matmul(self):
with self.static_graph(): with self.static_graph():
...@@ -197,8 +200,9 @@ class TestLayer(LayerTest): ...@@ -197,8 +200,9 @@ class TestLayer(LayerTest):
t = np.ones([3, 3], dtype='float32') t = np.ones([3, 3], dtype='float32')
t2 = np.ones([3, 3], dtype='float32') t2 = np.ones([3, 3], dtype='float32')
dy_ret = layers.matmul(base.to_variable(t), base.to_variable(t2)) dy_ret = layers.matmul(base.to_variable(t), base.to_variable(t2))
dy_ret_value = dy_ret.numpy()
self.assertTrue(np.allclose(static_ret, dy_ret.numpy())) self.assertTrue(np.allclose(static_ret, dy_ret_value))
def test_conv2d(self): def test_conv2d(self):
with self.static_graph(): with self.static_graph():
...@@ -222,6 +226,7 @@ class TestLayer(LayerTest): ...@@ -222,6 +226,7 @@ class TestLayer(LayerTest):
images = np.ones([2, 3, 5, 5], dtype='float32') images = np.ones([2, 3, 5, 5], dtype='float32')
conv2d = nn.Conv2D('conv2d', num_filters=3, filter_size=[2, 2]) conv2d = nn.Conv2D('conv2d', num_filters=3, filter_size=[2, 2])
dy_ret = conv2d(base.to_variable(images)) dy_ret = conv2d(base.to_variable(images))
dy_ret_value = dy_ret.numpy()
with self.dynamic_graph(): with self.dynamic_graph():
images = np.ones([2, 3, 5, 5], dtype='float32') images = np.ones([2, 3, 5, 5], dtype='float32')
...@@ -230,7 +235,7 @@ class TestLayer(LayerTest): ...@@ -230,7 +235,7 @@ class TestLayer(LayerTest):
dy_ret = conv2d(base.to_variable(images)) dy_ret = conv2d(base.to_variable(images))
self.assertTrue(conv2d._bias_param is None) self.assertTrue(conv2d._bias_param is None)
self.assertTrue(np.allclose(static_ret, dy_ret.numpy())) self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, static_ret2)) self.assertTrue(np.allclose(static_ret, static_ret2))
def test_gru_unit(self): def test_gru_unit(self):
...@@ -269,10 +274,13 @@ class TestLayer(LayerTest): ...@@ -269,10 +274,13 @@ class TestLayer(LayerTest):
gru = nn.GRUUnit('gru', size=D * 3) gru = nn.GRUUnit('gru', size=D * 3)
dy_ret = gru( dy_ret = gru(
base.to_variable(input), base.to_variable(hidden_input)) base.to_variable(input), base.to_variable(hidden_input))
dy_ret_value = []
for i in range(len(static_ret)):
dy_ret_value.append(dy_ret[i].numpy())
for i in range(len(static_ret)): for i in range(len(static_ret)):
self.assertTrue(np.allclose(static_ret[i], static_ret2[i])) self.assertTrue(np.allclose(static_ret[i], static_ret2[i]))
self.assertTrue(np.allclose(static_ret[i], dy_ret[i].numpy())) self.assertTrue(np.allclose(static_ret[i], dy_ret_value[i]))
def test_elementwise_math(self): def test_elementwise_math(self):
n = np.ones([3, 3], dtype='float32') n = np.ones([3, 3], dtype='float32')
...@@ -313,9 +321,8 @@ class TestLayer(LayerTest): ...@@ -313,9 +321,8 @@ class TestLayer(LayerTest):
ret = layers.elementwise_div(ret, n4) ret = layers.elementwise_div(ret, n4)
ret = layers.elementwise_sub(ret, n5) ret = layers.elementwise_sub(ret, n5)
dy_ret = layers.elementwise_mul(ret, n6) dy_ret = layers.elementwise_mul(ret, n6)
self.assertTrue( dy_ret_value = dy_ret.numpy()
np.allclose(static_ret, dy_ret.numpy()), self.assertTrue(np.allclose(static_ret, dy_ret_value))
'%s vs %s' % (static_ret, dy_ret.numpy()))
def test_elementwise_minmax(self): def test_elementwise_minmax(self):
n = np.ones([3, 3], dtype='float32') n = np.ones([3, 3], dtype='float32')
...@@ -324,9 +331,11 @@ class TestLayer(LayerTest): ...@@ -324,9 +331,11 @@ class TestLayer(LayerTest):
with self.dynamic_graph(): with self.dynamic_graph():
min_ret = layers.elementwise_min(n, n2) min_ret = layers.elementwise_min(n, n2)
max_ret = layers.elementwise_max(n, n2) max_ret = layers.elementwise_max(n, n2)
min_ret_value = min_ret.numpy()
max_ret_value = max_ret.numpy()
self.assertTrue(np.allclose(n, min_ret.numpy())) self.assertTrue(np.allclose(n, min_ret_value))
self.assertTrue(np.allclose(n2, max_ret.numpy())) self.assertTrue(np.allclose(n2, max_ret_value))
def test_sequence_conv(self): def test_sequence_conv(self):
inp_np = np.arange(12).reshape([3, 4]).astype('float32') inp_np = np.arange(12).reshape([3, 4]).astype('float32')
...@@ -404,8 +413,9 @@ class TestLayer(LayerTest): ...@@ -404,8 +413,9 @@ class TestLayer(LayerTest):
act='sigmoid', act='sigmoid',
bias_attr=fluid.initializer.ConstantInitializer(value=1)) bias_attr=fluid.initializer.ConstantInitializer(value=1))
dy_rlt = conv2d_transpose(base.to_variable(inp_np)) dy_rlt = conv2d_transpose(base.to_variable(inp_np))
dy_rlt_value = dy_rlt.numpy()
self.assertTrue(np.allclose(static_rlt2, static_rlt)) self.assertTrue(np.allclose(static_rlt2, static_rlt))
self.assertTrue(np.allclose(dy_rlt.numpy(), static_rlt2)) self.assertTrue(np.allclose(dy_rlt_value, static_rlt2))
def test_bilinear_tensor_product(self): def test_bilinear_tensor_product(self):
inp_np_x = np.array([[1, 2, 3]]).astype('float32') inp_np_x = np.array([[1, 2, 3]]).astype('float32')
...@@ -460,12 +470,12 @@ class TestLayer(LayerTest): ...@@ -460,12 +470,12 @@ class TestLayer(LayerTest):
bias_attr=fluid.initializer.ConstantInitializer(value=1), bias_attr=fluid.initializer.ConstantInitializer(value=1),
act='sigmoid') act='sigmoid')
dy_rlt = btp(base.to_variable(inp_np_x), base.to_variable(inp_np_y)) dy_rlt = btp(base.to_variable(inp_np_x), base.to_variable(inp_np_y))
dy_rlt_value = dy_rlt.numpy()
with self.dynamic_graph(): with self.dynamic_graph():
btp2 = nn.BilinearTensorProduct('btp', 6, act='sigmoid') btp2 = nn.BilinearTensorProduct('btp', 6, act='sigmoid')
dy_rlt2 = btp2( dy_rlt2 = btp2(
base.to_variable(inp_np_x), base.to_variable(inp_np_y)) base.to_variable(inp_np_x), base.to_variable(inp_np_y))
dy_rlt2_value = dy_rlt2.numpy()
with self.static_graph(): with self.static_graph():
data_x2 = layers.data( data_x2 = layers.data(
name='x', name='x',
...@@ -484,9 +494,9 @@ class TestLayer(LayerTest): ...@@ -484,9 +494,9 @@ class TestLayer(LayerTest):
feed={'x': inp_np_x, feed={'x': inp_np_x,
'y': inp_np_y}, fetch_list=[out2])[0] 'y': inp_np_y}, fetch_list=[out2])[0]
self.assertTrue(np.array_equal(dy_rlt2.numpy(), static_rlt3)) self.assertTrue(np.array_equal(dy_rlt2_value, static_rlt3))
self.assertTrue(np.array_equal(static_rlt2, static_rlt)) self.assertTrue(np.array_equal(static_rlt2, static_rlt))
self.assertTrue(np.array_equal(dy_rlt.numpy(), static_rlt)) self.assertTrue(np.array_equal(dy_rlt_value, static_rlt))
def test_prelu(self): def test_prelu(self):
inp_np = np.ones([5, 200, 100, 100]).astype('float32') inp_np = np.ones([5, 200, 100, 100]).astype('float32')
...@@ -525,9 +535,10 @@ class TestLayer(LayerTest): ...@@ -525,9 +535,10 @@ class TestLayer(LayerTest):
mode=mode, mode=mode,
param_attr=ParamAttr(initializer=Constant(1.0))) param_attr=ParamAttr(initializer=Constant(1.0)))
dy_rlt = prelu(base.to_variable(inp_np)) dy_rlt = prelu(base.to_variable(inp_np))
dy_rlt_value = dy_rlt.numpy()
self.assertTrue(np.allclose(static_rlt2, static_rlt)) self.assertTrue(np.allclose(static_rlt2, static_rlt))
self.assertTrue(np.allclose(dy_rlt.numpy(), static_rlt)) self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
def test_embeding(self): def test_embeding(self):
inp_word = np.array([[[1]]]).astype('int64') inp_word = np.array([[[1]]]).astype('int64')
...@@ -557,10 +568,11 @@ class TestLayer(LayerTest): ...@@ -557,10 +568,11 @@ class TestLayer(LayerTest):
size=[dict_size, 32], size=[dict_size, 32],
param_attr='emb.w', param_attr='emb.w',
is_sparse=False) is_sparse=False)
static_rlt3 = emb2(base.to_variable(inp_word)) dy_rlt = emb2(base.to_variable(inp_word))
dy_rlt_value = dy_rlt.numpy()
self.assertTrue(np.allclose(static_rlt2, static_rlt)) self.assertTrue(np.allclose(static_rlt2, static_rlt))
self.assertTrue(np.allclose(static_rlt3.numpy(), static_rlt)) self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
def test_nce(self): def test_nce(self):
window_size = 5 window_size = 5
...@@ -677,10 +689,11 @@ class TestLayer(LayerTest): ...@@ -677,10 +689,11 @@ class TestLayer(LayerTest):
bias_attr='nce.b', bias_attr='nce.b',
sample_weight=sample_weights) sample_weight=sample_weights)
nce_loss3 = nce(embs3, words[label_word]) dy_rlt = nce(embs3, words[label_word])
dy_rlt_value = dy_rlt.numpy()
self.assertTrue(np.allclose(static_rlt2, static_rlt)) self.assertTrue(np.allclose(static_rlt2, static_rlt))
self.assertTrue(np.allclose(nce_loss3.numpy(), static_rlt)) self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
def test_conv3d(self): def test_conv3d(self):
with self.static_graph(): with self.static_graph():
...@@ -706,8 +719,9 @@ class TestLayer(LayerTest): ...@@ -706,8 +719,9 @@ class TestLayer(LayerTest):
images = np.ones([2, 3, 6, 6, 6], dtype='float32') images = np.ones([2, 3, 6, 6, 6], dtype='float32')
conv3d = nn.Conv3D('conv3d', num_filters=3, filter_size=2) conv3d = nn.Conv3D('conv3d', num_filters=3, filter_size=2)
dy_ret = conv3d(base.to_variable(images)) dy_ret = conv3d(base.to_variable(images))
dy_rlt_value = dy_ret.numpy()
self.assertTrue(np.allclose(static_ret, dy_ret.numpy())) self.assertTrue(np.allclose(static_ret, dy_rlt_value))
self.assertTrue(np.allclose(static_ret, static_ret2)) self.assertTrue(np.allclose(static_ret, static_ret2))
def test_row_conv(self): def test_row_conv(self):
...@@ -800,8 +814,9 @@ class TestLayer(LayerTest): ...@@ -800,8 +814,9 @@ class TestLayer(LayerTest):
with self.dynamic_graph(): with self.dynamic_graph():
groupNorm = nn.GroupNorm('GroupNorm', groups=2) groupNorm = nn.GroupNorm('GroupNorm', groups=2)
dy_ret = groupNorm(base.to_variable(input)) dy_ret = groupNorm(base.to_variable(input))
dy_rlt_value = dy_ret.numpy()
self.assertTrue(np.allclose(static_ret, dy_ret.numpy())) self.assertTrue(np.allclose(static_ret, dy_rlt_value))
self.assertTrue(np.allclose(static_ret, static_ret2)) self.assertTrue(np.allclose(static_ret, static_ret2))
def test_spectral_norm(self): def test_spectral_norm(self):
...@@ -850,8 +865,9 @@ class TestLayer(LayerTest): ...@@ -850,8 +865,9 @@ class TestLayer(LayerTest):
with self.dynamic_graph(): with self.dynamic_graph():
spectralNorm = nn.SpectralNorm('SpectralNorm', dim=1, power_iters=2) spectralNorm = nn.SpectralNorm('SpectralNorm', dim=1, power_iters=2)
dy_ret = spectralNorm(base.to_variable(input)) dy_ret = spectralNorm(base.to_variable(input))
dy_rlt_value = dy_ret.numpy()
self.assertTrue(np.allclose(static_ret, dy_ret.numpy())) self.assertTrue(np.allclose(static_ret, dy_rlt_value))
self.assertTrue(np.allclose(static_ret, static_ret2)) self.assertTrue(np.allclose(static_ret, static_ret2))
def test_tree_conv(self): def test_tree_conv(self):
...@@ -922,9 +938,10 @@ class TestLayer(LayerTest): ...@@ -922,9 +938,10 @@ class TestLayer(LayerTest):
treeConv = nn.TreeConv( treeConv = nn.TreeConv(
'SpectralNorm', output_size=6, num_filters=1, max_depth=2) 'SpectralNorm', output_size=6, num_filters=1, max_depth=2)
dy_ret = treeConv(base.to_variable(vectors), base.to_variable(adj)) dy_ret = treeConv(base.to_variable(vectors), base.to_variable(adj))
dy_rlt_value = dy_ret.numpy()
self.assertTrue(np.allclose(static_ret, static_ret2)) self.assertTrue(np.allclose(static_ret, static_ret2))
self.assertTrue(np.allclose(static_ret, dy_ret.numpy())) self.assertTrue(np.allclose(static_ret, dy_rlt_value))
def test_conv3d_transpose(self): def test_conv3d_transpose(self):
input_array = np.arange(0, 48).reshape( input_array = np.arange(0, 48).reshape(
...@@ -953,8 +970,9 @@ class TestLayer(LayerTest): ...@@ -953,8 +970,9 @@ class TestLayer(LayerTest):
filter_size=12, filter_size=12,
use_cudnn=False) use_cudnn=False)
dy_rlt = conv3d_transpose(base.to_variable(input_array)) dy_rlt = conv3d_transpose(base.to_variable(input_array))
dy_rlt_value = dy_rlt.numpy()
self.assertTrue(np.allclose(static_rlt2, static_rlt)) self.assertTrue(np.allclose(static_rlt2, static_rlt))
self.assertTrue(np.allclose(dy_rlt.numpy(), static_rlt)) self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
def test_eye_op(self): def test_eye_op(self):
np_eye = np.eye(3, 2) np_eye = np.eye(3, 2)
...@@ -972,11 +990,14 @@ class TestLayer(LayerTest): ...@@ -972,11 +990,14 @@ class TestLayer(LayerTest):
num_columns=2, num_columns=2,
batch_shape=[4, 3]) batch_shape=[4, 3])
diag_tensor = layers.eye(20) diag_tensor = layers.eye(20)
eye_tensor_value = eye_tensor.numpy()
self.assertTrue(np.allclose(eye_tensor.numpy(), np_eye)) eye_tensor_rlt1_value = eye_tensor_rlt1.numpy()
self.assertTrue(np.allclose(eye_tensor_rlt1.numpy(), stack_rlt1)) eye_tensor_rlt2_value = eye_tensor_rlt2.numpy()
self.assertTrue(np.allclose(eye_tensor_rlt2.numpy(), stack_rlt2)) diag_tensor_value = diag_tensor.numpy()
self.assertTrue(np.allclose(diag_tensor.numpy(), np.eye(20))) self.assertTrue(np.allclose(eye_tensor_value, np_eye))
self.assertTrue(np.allclose(eye_tensor_rlt1_value, stack_rlt1))
self.assertTrue(np.allclose(eye_tensor_rlt2_value, stack_rlt2))
self.assertTrue(np.allclose(diag_tensor_value, np.eye(20)))
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.eye(num_rows=3.1) layers.eye(num_rows=3.1)
...@@ -998,8 +1019,9 @@ class TestLayer(LayerTest): ...@@ -998,8 +1019,9 @@ class TestLayer(LayerTest):
with self.dynamic_graph(): with self.dynamic_graph():
t = np.ones([3, 3], dtype='float32') t = np.ones([3, 3], dtype='float32')
dy_ret = layers.hard_swish(base.to_variable(t)) dy_ret = layers.hard_swish(base.to_variable(t))
dy_ret_rlt = dy_ret.numpy()
self.assertTrue(np.allclose(static_ret, dy_ret.numpy())) self.assertTrue(np.allclose(static_ret, dy_ret_rlt))
def test_compare(self): def test_compare(self):
value_a = np.arange(3) value_a = np.arange(3)
...@@ -1160,8 +1182,9 @@ class TestBook(LayerTest): ...@@ -1160,8 +1182,9 @@ class TestBook(LayerTest):
dy_result = method() dy_result = method()
if isinstance(dy_result, tuple): if isinstance(dy_result, tuple):
dy_result = dy_result[0] dy_result = dy_result[0]
dy_result_value = dy_result.numpy()
self.assertTrue(np.array_equal(static_result[0], dy_result.numpy())) self.assertTrue(np.array_equal(static_result[0], dy_result_value))
def _get_np_data(self, shape, dtype, append_batch_size=True): def _get_np_data(self, shape, dtype, append_batch_size=True):
np.random.seed(self.seed) np.random.seed(self.seed)
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle.fluid as fluid
import unittest
class TestProgram(unittest.TestCase):
def test_program_to_string(self):
prog = fluid.default_main_program()
a = fluid.layers.data(
name="X", shape=[2, 3], dtype="float32", append_batch_size=False)
c = fluid.layers.fc(a, size=3)
prog_string = prog.to_string(throw_on_error=True, with_details=False)
prog_string_with_details = prog.to_string(
throw_on_error=False, with_details=True)
assert prog_string is not None
assert len(prog_string_with_details) > len(prog_string)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册