提交 54e07994 编写于 作者: Y Youwei Song 提交者: Tao Luo

Dygraph Layer kwargs & param getter setter (#19901)

* opt FC

* opt rest of dygraph.nn

* new param shape check and unittest

* add kwargs for Layer

* add new set_value api

* use property decorator

* update API.spec, test=develop

* use UserList, separate gettersetters, test=develop

* update test_custom_layer_with_kwargs, test=develop

* fix UserList compatibility, test=develop

* fix UserList compatibility, test=develop

* keep FC._w, test=develop

* add unittests, Conv3D bug fix, test=develop

* clean code, test=develop

* fix dygraph guard in unittest, test=develop

* add property setters, remove unused param in tracer, test=develop

* tracer none check, test=develop

* merge, test=develop

* refine, test=develop

* bug fix in  prelu and conv3d_transpose, test=develop

* rm __set__, test=develop

* set tensor value instead of assign op

* fix property setter call, test=develop

* fix api.spec, test=develop

* fix doc sample, test=develop
上级 9de67725
...@@ -24,6 +24,7 @@ paddle.fluid.Variable.clear_gradient (ArgSpec(args=['self'], varargs=None, keywo ...@@ -24,6 +24,7 @@ paddle.fluid.Variable.clear_gradient (ArgSpec(args=['self'], varargs=None, keywo
paddle.fluid.Variable.detach (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '0730b2d310b014d9b0a903b2034757d7')) paddle.fluid.Variable.detach (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '0730b2d310b014d9b0a903b2034757d7'))
paddle.fluid.Variable.gradient (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '86b246bfaf20f3058e91927abbcf9fb9')) paddle.fluid.Variable.gradient (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '86b246bfaf20f3058e91927abbcf9fb9'))
paddle.fluid.Variable.numpy (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '7536e8feb56d827875943e7f01d406fc')) paddle.fluid.Variable.numpy (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '7536e8feb56d827875943e7f01d406fc'))
paddle.fluid.Variable.set_value (ArgSpec(args=['self', 'value'], varargs=None, keywords=None, defaults=None), ('document', 'c424b9e763ff51c38a6917f98026fe7d'))
paddle.fluid.Variable.to_string (ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,)), ('document', '31f359a2c074f26dc0ffff296fc3983f')) paddle.fluid.Variable.to_string (ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,)), ('document', '31f359a2c074f26dc0ffff296fc3983f'))
paddle.fluid.Executor ('paddle.fluid.executor.Executor', ('document', '34e8c1769313fbeff7817212dda6259e')) paddle.fluid.Executor ('paddle.fluid.executor.Executor', ('document', '34e8c1769313fbeff7817212dda6259e'))
paddle.fluid.Executor.__init__ (ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.Executor.__init__ (ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
...@@ -575,7 +576,7 @@ paddle.fluid.dygraph.Layer.clear_gradients (ArgSpec(args=['self'], varargs=None, ...@@ -575,7 +576,7 @@ paddle.fluid.dygraph.Layer.clear_gradients (ArgSpec(args=['self'], varargs=None,
paddle.fluid.dygraph.Layer.create_parameter (ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'a6420ca1455366eaaf972191612de0b6')) paddle.fluid.dygraph.Layer.create_parameter (ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'a6420ca1455366eaaf972191612de0b6'))
paddle.fluid.dygraph.Layer.create_variable (ArgSpec(args=['self', 'name', 'persistable', 'dtype', 'type'], varargs=None, keywords=None, defaults=(None, None, None, VarType.LOD_TENSOR)), ('document', '171cccfceba636d5bbf7bbae672945d8')) paddle.fluid.dygraph.Layer.create_variable (ArgSpec(args=['self', 'name', 'persistable', 'dtype', 'type'], varargs=None, keywords=None, defaults=(None, None, None, VarType.LOD_TENSOR)), ('document', '171cccfceba636d5bbf7bbae672945d8'))
paddle.fluid.dygraph.Layer.eval (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Layer.eval (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Layer.forward (ArgSpec(args=['self'], varargs='inputs', keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Layer.forward (ArgSpec(args=['self'], varargs='inputs', keywords='kwargs', defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Layer.full_name (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '23ce4f961f48ed0f79cadf93a3938ed2')) paddle.fluid.dygraph.Layer.full_name (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '23ce4f961f48ed0f79cadf93a3938ed2'))
paddle.fluid.dygraph.Layer.load_dict (ArgSpec(args=['self', 'stat_dict', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Layer.load_dict (ArgSpec(args=['self', 'stat_dict', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Layer.parameters (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '5aec25a854eb57abc798dccccbb507d5')) paddle.fluid.dygraph.Layer.parameters (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '5aec25a854eb57abc798dccccbb507d5'))
...@@ -585,7 +586,7 @@ paddle.fluid.dygraph.Layer.train (ArgSpec(args=['self'], varargs=None, keywords= ...@@ -585,7 +586,7 @@ paddle.fluid.dygraph.Layer.train (ArgSpec(args=['self'], varargs=None, keywords=
paddle.fluid.dygraph.__impl__ (ArgSpec(args=['func'], varargs=None, keywords=None, defaults=()), ('document', '75d1d3afccc8b39cdebf05cb1f5969f9')) paddle.fluid.dygraph.__impl__ (ArgSpec(args=['func'], varargs=None, keywords=None, defaults=()), ('document', '75d1d3afccc8b39cdebf05cb1f5969f9'))
paddle.fluid.dygraph.guard (ArgSpec(args=['place'], varargs=None, keywords=None, defaults=(None,)), ('document', '7071320ffe2eec9aacdae574951278c6')) paddle.fluid.dygraph.guard (ArgSpec(args=['place'], varargs=None, keywords=None, defaults=(None,)), ('document', '7071320ffe2eec9aacdae574951278c6'))
paddle.fluid.dygraph.to_variable (ArgSpec(args=['value', 'block', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '0e69fa3666f15dd01b6e3e270b9371cd')) paddle.fluid.dygraph.to_variable (ArgSpec(args=['value', 'block', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '0e69fa3666f15dd01b6e3e270b9371cd'))
paddle.fluid.dygraph.Conv2D ('paddle.fluid.dygraph.nn.Conv2D', ('document', 'baafe7ae0d3a61ae79cf4c7443e2c37c')) paddle.fluid.dygraph.Conv2D ('paddle.fluid.dygraph.nn.Conv2D', ('document', '0b6acb9cc7fbb4f5b129e1f6dd985581'))
paddle.fluid.dygraph.Conv2D.__init__ (ArgSpec(args=['self', 'name_scope', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'dtype'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv2D.__init__ (ArgSpec(args=['self', 'name_scope', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'dtype'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv2D.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.Conv2D.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.Conv2D.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.Conv2D.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -601,7 +602,7 @@ paddle.fluid.dygraph.Conv2D.parameters (ArgSpec(args=['self', 'include_sublayers ...@@ -601,7 +602,7 @@ paddle.fluid.dygraph.Conv2D.parameters (ArgSpec(args=['self', 'include_sublayers
paddle.fluid.dygraph.Conv2D.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv2D.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv2D.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.Conv2D.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.Conv2D.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv2D.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv3D ('paddle.fluid.dygraph.nn.Conv3D', ('document', '8b756aaca65af9594cc574d9a5d2b055')) paddle.fluid.dygraph.Conv3D ('paddle.fluid.dygraph.nn.Conv3D', ('document', '50412bd3fbf3557a8ef48e25c6517025'))
paddle.fluid.dygraph.Conv3D.__init__ (ArgSpec(args=['self', 'name_scope', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv3D.__init__ (ArgSpec(args=['self', 'name_scope', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv3D.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.Conv3D.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.Conv3D.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.Conv3D.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -617,7 +618,7 @@ paddle.fluid.dygraph.Conv3D.parameters (ArgSpec(args=['self', 'include_sublayers ...@@ -617,7 +618,7 @@ paddle.fluid.dygraph.Conv3D.parameters (ArgSpec(args=['self', 'include_sublayers
paddle.fluid.dygraph.Conv3D.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv3D.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv3D.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.Conv3D.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.Conv3D.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv3D.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Pool2D ('paddle.fluid.dygraph.nn.Pool2D', ('document', 'e9331666e47a38586c8809a23cbaf7de')) paddle.fluid.dygraph.Pool2D ('paddle.fluid.dygraph.nn.Pool2D', ('document', '50e6fd200e42859daf2924ecb0561ada'))
paddle.fluid.dygraph.Pool2D.__init__ (ArgSpec(args=['self', 'name_scope', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'exclusive', 'dtype'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, True, VarType.FP32)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Pool2D.__init__ (ArgSpec(args=['self', 'name_scope', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'exclusive', 'dtype'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, True, VarType.FP32)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Pool2D.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.Pool2D.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.Pool2D.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.Pool2D.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -633,7 +634,7 @@ paddle.fluid.dygraph.Pool2D.parameters (ArgSpec(args=['self', 'include_sublayers ...@@ -633,7 +634,7 @@ paddle.fluid.dygraph.Pool2D.parameters (ArgSpec(args=['self', 'include_sublayers
paddle.fluid.dygraph.Pool2D.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Pool2D.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Pool2D.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.Pool2D.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.Pool2D.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Pool2D.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.FC ('paddle.fluid.dygraph.nn.FC', ('document', '1d64242f03f2aca2307e94590b552430')) paddle.fluid.dygraph.FC ('paddle.fluid.dygraph.nn.FC', ('document', '2f73ae00e57c67454c6aa7e911d9bfd6'))
paddle.fluid.dygraph.FC.__init__ (ArgSpec(args=['self', 'name_scope', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'dtype'], varargs=None, keywords=None, defaults=(1, None, None, None, False, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.FC.__init__ (ArgSpec(args=['self', 'name_scope', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'dtype'], varargs=None, keywords=None, defaults=(1, None, None, None, False, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.FC.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.FC.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.FC.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.FC.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -649,7 +650,7 @@ paddle.fluid.dygraph.FC.parameters (ArgSpec(args=['self', 'include_sublayers'], ...@@ -649,7 +650,7 @@ paddle.fluid.dygraph.FC.parameters (ArgSpec(args=['self', 'include_sublayers'],
paddle.fluid.dygraph.FC.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.FC.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.FC.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.FC.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.FC.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.FC.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.BatchNorm ('paddle.fluid.dygraph.nn.BatchNorm', ('document', '0b609e10e4d417c91d346f887d16771c')) paddle.fluid.dygraph.BatchNorm ('paddle.fluid.dygraph.nn.BatchNorm', ('document', '390fb9b986423ec6680731ffc7cf24ab'))
paddle.fluid.dygraph.BatchNorm.__init__ (ArgSpec(args=['self', 'name_scope', 'num_channels', 'act', 'is_test', 'momentum', 'epsilon', 'param_attr', 'bias_attr', 'dtype', 'data_layout', 'in_place', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var', 'fuse_with_relu', 'use_global_stats', 'trainable_statistics'], varargs=None, keywords=None, defaults=(None, False, 0.9, 1e-05, None, None, 'float32', 'NCHW', False, None, None, False, False, False, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.BatchNorm.__init__ (ArgSpec(args=['self', 'name_scope', 'num_channels', 'act', 'is_test', 'momentum', 'epsilon', 'param_attr', 'bias_attr', 'dtype', 'data_layout', 'in_place', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var', 'fuse_with_relu', 'use_global_stats', 'trainable_statistics'], varargs=None, keywords=None, defaults=(None, False, 0.9, 1e-05, None, None, 'float32', 'NCHW', False, None, None, False, False, False, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.BatchNorm.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.BatchNorm.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.BatchNorm.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.BatchNorm.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -665,7 +666,7 @@ paddle.fluid.dygraph.BatchNorm.parameters (ArgSpec(args=['self', 'include_sublay ...@@ -665,7 +666,7 @@ paddle.fluid.dygraph.BatchNorm.parameters (ArgSpec(args=['self', 'include_sublay
paddle.fluid.dygraph.BatchNorm.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.BatchNorm.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.BatchNorm.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.BatchNorm.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.BatchNorm.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.BatchNorm.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Embedding ('paddle.fluid.dygraph.nn.Embedding', ('document', 'ecf8dc4149f005cd30eddc0dd343454f')) paddle.fluid.dygraph.Embedding ('paddle.fluid.dygraph.nn.Embedding', ('document', 'b1b1ed9dc2125c3e16ee08113605fcb4'))
paddle.fluid.dygraph.Embedding.__init__ (ArgSpec(args=['self', 'name_scope', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Embedding.__init__ (ArgSpec(args=['self', 'name_scope', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Embedding.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.Embedding.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.Embedding.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.Embedding.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -681,7 +682,7 @@ paddle.fluid.dygraph.Embedding.parameters (ArgSpec(args=['self', 'include_sublay ...@@ -681,7 +682,7 @@ paddle.fluid.dygraph.Embedding.parameters (ArgSpec(args=['self', 'include_sublay
paddle.fluid.dygraph.Embedding.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Embedding.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Embedding.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.Embedding.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.Embedding.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Embedding.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.GRUUnit ('paddle.fluid.dygraph.nn.GRUUnit', ('document', '5308e42b6a6db4681ce5ee9e94983986')) paddle.fluid.dygraph.GRUUnit ('paddle.fluid.dygraph.nn.GRUUnit', ('document', '389e860e455b67aab1f4d472ac9d7e49'))
paddle.fluid.dygraph.GRUUnit.__init__ (ArgSpec(args=['self', 'name_scope', 'size', 'param_attr', 'bias_attr', 'activation', 'gate_activation', 'origin_mode', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 'tanh', 'sigmoid', False, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.GRUUnit.__init__ (ArgSpec(args=['self', 'name_scope', 'size', 'param_attr', 'bias_attr', 'activation', 'gate_activation', 'origin_mode', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 'tanh', 'sigmoid', False, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.GRUUnit.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.GRUUnit.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.GRUUnit.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.GRUUnit.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -697,7 +698,7 @@ paddle.fluid.dygraph.GRUUnit.parameters (ArgSpec(args=['self', 'include_sublayer ...@@ -697,7 +698,7 @@ paddle.fluid.dygraph.GRUUnit.parameters (ArgSpec(args=['self', 'include_sublayer
paddle.fluid.dygraph.GRUUnit.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.GRUUnit.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.GRUUnit.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.GRUUnit.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.GRUUnit.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.GRUUnit.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.LayerNorm ('paddle.fluid.dygraph.nn.LayerNorm', ('document', 'b44f5d3d10386c460094e21f24ff272b')) paddle.fluid.dygraph.LayerNorm ('paddle.fluid.dygraph.nn.LayerNorm', ('document', '8bc39f59fe2d3713bc143fdf1222a63b'))
paddle.fluid.dygraph.LayerNorm.__init__ (ArgSpec(args=['self', 'name_scope', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.LayerNorm.__init__ (ArgSpec(args=['self', 'name_scope', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.LayerNorm.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.LayerNorm.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.LayerNorm.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.LayerNorm.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -713,7 +714,7 @@ paddle.fluid.dygraph.LayerNorm.parameters (ArgSpec(args=['self', 'include_sublay ...@@ -713,7 +714,7 @@ paddle.fluid.dygraph.LayerNorm.parameters (ArgSpec(args=['self', 'include_sublay
paddle.fluid.dygraph.LayerNorm.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.LayerNorm.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.LayerNorm.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.LayerNorm.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.LayerNorm.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.LayerNorm.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.NCE ('paddle.fluid.dygraph.nn.NCE', ('document', '2d579e8d9ce31bb29e079e5f6108fc73')) paddle.fluid.dygraph.NCE ('paddle.fluid.dygraph.nn.NCE', ('document', '993aeea9be436e9c709a758795cb23e9'))
paddle.fluid.dygraph.NCE.__init__ (ArgSpec(args=['self', 'name_scope', 'num_total_classes', 'sample_weight', 'param_attr', 'bias_attr', 'num_neg_samples', 'sampler', 'custom_dist', 'seed', 'is_sparse'], varargs=None, keywords=None, defaults=(None, None, None, None, 'uniform', None, 0, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.NCE.__init__ (ArgSpec(args=['self', 'name_scope', 'num_total_classes', 'sample_weight', 'param_attr', 'bias_attr', 'num_neg_samples', 'sampler', 'custom_dist', 'seed', 'is_sparse'], varargs=None, keywords=None, defaults=(None, None, None, None, 'uniform', None, 0, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.NCE.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.NCE.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.NCE.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.NCE.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -729,7 +730,7 @@ paddle.fluid.dygraph.NCE.parameters (ArgSpec(args=['self', 'include_sublayers'], ...@@ -729,7 +730,7 @@ paddle.fluid.dygraph.NCE.parameters (ArgSpec(args=['self', 'include_sublayers'],
paddle.fluid.dygraph.NCE.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.NCE.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.NCE.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.NCE.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.NCE.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.NCE.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.PRelu ('paddle.fluid.dygraph.nn.PRelu', ('document', 'd395ed163b4cf86e7207968f27bc1c11')) paddle.fluid.dygraph.PRelu ('paddle.fluid.dygraph.nn.PRelu', ('document', 'da956af1676b08bf15553751a3643b55'))
paddle.fluid.dygraph.PRelu.__init__ (ArgSpec(args=['self', 'name_scope', 'mode', 'param_attr'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.PRelu.__init__ (ArgSpec(args=['self', 'name_scope', 'mode', 'param_attr'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.PRelu.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.PRelu.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.PRelu.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.PRelu.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -745,7 +746,7 @@ paddle.fluid.dygraph.PRelu.parameters (ArgSpec(args=['self', 'include_sublayers' ...@@ -745,7 +746,7 @@ paddle.fluid.dygraph.PRelu.parameters (ArgSpec(args=['self', 'include_sublayers'
paddle.fluid.dygraph.PRelu.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.PRelu.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.PRelu.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.PRelu.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.PRelu.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.PRelu.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.BilinearTensorProduct ('paddle.fluid.dygraph.nn.BilinearTensorProduct', ('document', '310140d784933928a27db9a7af4761e8')) paddle.fluid.dygraph.BilinearTensorProduct ('paddle.fluid.dygraph.nn.BilinearTensorProduct', ('document', 'be70d0f6d43729d9cb80c9a34ed5f26b'))
paddle.fluid.dygraph.BilinearTensorProduct.__init__ (ArgSpec(args=['self', 'name_scope', 'size', 'name', 'act', 'param_attr', 'bias_attr'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.BilinearTensorProduct.__init__ (ArgSpec(args=['self', 'name_scope', 'size', 'name', 'act', 'param_attr', 'bias_attr'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.BilinearTensorProduct.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.BilinearTensorProduct.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.BilinearTensorProduct.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.BilinearTensorProduct.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -761,7 +762,7 @@ paddle.fluid.dygraph.BilinearTensorProduct.parameters (ArgSpec(args=['self', 'in ...@@ -761,7 +762,7 @@ paddle.fluid.dygraph.BilinearTensorProduct.parameters (ArgSpec(args=['self', 'in
paddle.fluid.dygraph.BilinearTensorProduct.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.BilinearTensorProduct.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.BilinearTensorProduct.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.BilinearTensorProduct.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.BilinearTensorProduct.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.BilinearTensorProduct.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv2DTranspose ('paddle.fluid.dygraph.nn.Conv2DTranspose', ('document', '918fa8ad8a62ff424c842fb8a840bf7a')) paddle.fluid.dygraph.Conv2DTranspose ('paddle.fluid.dygraph.nn.Conv2DTranspose', ('document', 'cf23c905abc00b07603dfa71a432d6f7'))
paddle.fluid.dygraph.Conv2DTranspose.__init__ (ArgSpec(args=['self', 'name_scope', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv2DTranspose.__init__ (ArgSpec(args=['self', 'name_scope', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv2DTranspose.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.Conv2DTranspose.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.Conv2DTranspose.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.Conv2DTranspose.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -777,7 +778,7 @@ paddle.fluid.dygraph.Conv2DTranspose.parameters (ArgSpec(args=['self', 'include_ ...@@ -777,7 +778,7 @@ paddle.fluid.dygraph.Conv2DTranspose.parameters (ArgSpec(args=['self', 'include_
paddle.fluid.dygraph.Conv2DTranspose.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv2DTranspose.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv2DTranspose.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.Conv2DTranspose.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.Conv2DTranspose.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv2DTranspose.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv3DTranspose ('paddle.fluid.dygraph.nn.Conv3DTranspose', ('document', 'cd99906d0813609ddea3fb6a2ac900dc')) paddle.fluid.dygraph.Conv3DTranspose ('paddle.fluid.dygraph.nn.Conv3DTranspose', ('document', '91ba132bc690eaf76eabdbde8f87e4a0'))
paddle.fluid.dygraph.Conv3DTranspose.__init__ (ArgSpec(args=['self', 'name_scope', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv3DTranspose.__init__ (ArgSpec(args=['self', 'name_scope', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv3DTranspose.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.Conv3DTranspose.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.Conv3DTranspose.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.Conv3DTranspose.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -793,7 +794,7 @@ paddle.fluid.dygraph.Conv3DTranspose.parameters (ArgSpec(args=['self', 'include_ ...@@ -793,7 +794,7 @@ paddle.fluid.dygraph.Conv3DTranspose.parameters (ArgSpec(args=['self', 'include_
paddle.fluid.dygraph.Conv3DTranspose.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv3DTranspose.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Conv3DTranspose.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.Conv3DTranspose.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.Conv3DTranspose.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.Conv3DTranspose.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.GroupNorm ('paddle.fluid.dygraph.nn.GroupNorm', ('document', '4d65fc6b00970e3b5c5dd0abeacd47cb')) paddle.fluid.dygraph.GroupNorm ('paddle.fluid.dygraph.nn.GroupNorm', ('document', '72c125b07bdd1e612607dc77039b2722'))
paddle.fluid.dygraph.GroupNorm.__init__ (ArgSpec(args=['self', 'name_scope', 'groups', 'epsilon', 'param_attr', 'bias_attr', 'act', 'data_layout'], varargs=None, keywords=None, defaults=(1e-05, None, None, None, 'NCHW')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.GroupNorm.__init__ (ArgSpec(args=['self', 'name_scope', 'groups', 'epsilon', 'param_attr', 'bias_attr', 'act', 'data_layout'], varargs=None, keywords=None, defaults=(1e-05, None, None, None, 'NCHW')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.GroupNorm.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.GroupNorm.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.GroupNorm.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.GroupNorm.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -809,7 +810,7 @@ paddle.fluid.dygraph.GroupNorm.parameters (ArgSpec(args=['self', 'include_sublay ...@@ -809,7 +810,7 @@ paddle.fluid.dygraph.GroupNorm.parameters (ArgSpec(args=['self', 'include_sublay
paddle.fluid.dygraph.GroupNorm.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.GroupNorm.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.GroupNorm.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.GroupNorm.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.GroupNorm.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.GroupNorm.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.SpectralNorm ('paddle.fluid.dygraph.nn.SpectralNorm', ('document', 'f400a29393aa95fff829b4a6111e2952')) paddle.fluid.dygraph.SpectralNorm ('paddle.fluid.dygraph.nn.SpectralNorm', ('document', '8f5cfbc431a8b4b44b605cde8b0381ef'))
paddle.fluid.dygraph.SpectralNorm.__init__ (ArgSpec(args=['self', 'name_scope', 'dim', 'power_iters', 'eps', 'name'], varargs=None, keywords=None, defaults=(0, 1, 1e-12, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.SpectralNorm.__init__ (ArgSpec(args=['self', 'name_scope', 'dim', 'power_iters', 'eps', 'name'], varargs=None, keywords=None, defaults=(0, 1, 1e-12, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.SpectralNorm.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.SpectralNorm.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.SpectralNorm.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.SpectralNorm.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
...@@ -825,7 +826,7 @@ paddle.fluid.dygraph.SpectralNorm.parameters (ArgSpec(args=['self', 'include_sub ...@@ -825,7 +826,7 @@ paddle.fluid.dygraph.SpectralNorm.parameters (ArgSpec(args=['self', 'include_sub
paddle.fluid.dygraph.SpectralNorm.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.SpectralNorm.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.SpectralNorm.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62')) paddle.fluid.dygraph.SpectralNorm.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
paddle.fluid.dygraph.SpectralNorm.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.SpectralNorm.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.TreeConv ('paddle.fluid.dygraph.nn.TreeConv', ('document', '1e3104dea2482f6b79cf7a7ac9a343ab')) paddle.fluid.dygraph.TreeConv ('paddle.fluid.dygraph.nn.TreeConv', ('document', '6e175a7bf2a43ae6c0f3a8a54bd69afe'))
paddle.fluid.dygraph.TreeConv.__init__ (ArgSpec(args=['self', 'name_scope', 'output_size', 'num_filters', 'max_depth', 'act', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(1, 2, 'tanh', None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.dygraph.TreeConv.__init__ (ArgSpec(args=['self', 'name_scope', 'output_size', 'num_filters', 'max_depth', 'act', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(1, 2, 'tanh', None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.TreeConv.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1')) paddle.fluid.dygraph.TreeConv.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
paddle.fluid.dygraph.TreeConv.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995')) paddle.fluid.dygraph.TreeConv.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
......
...@@ -150,20 +150,20 @@ class Layer(core.Layer): ...@@ -150,20 +150,20 @@ class Layer(core.Layer):
if p.trainable: if p.trainable:
p.clear_gradient() p.clear_gradient()
def _build_once(self, *args): def _build_once(self, *args, **kwargs):
pass pass
def __call__(self, *inputs): def __call__(self, *inputs, **kwargs):
if not self._built: if not self._built:
self._build_once(*inputs) self._build_once(*inputs, **kwargs)
if parallel_helper._is_data_parallel_mode(): if parallel_helper._is_data_parallel_mode():
parallel_helper._broadcast_parameters(self._parameters.values()) parallel_helper._broadcast_parameters(self._parameters.values())
outputs = self.forward(*inputs) outputs = self.forward(*inputs, **kwargs)
self._built = True self._built = True
return outputs return outputs
def forward(self, *inputs): def forward(self, *inputs, **kwargs):
raise NotImplementedError raise NotImplementedError
def backward(self, *inputs): def backward(self, *inputs):
...@@ -216,6 +216,8 @@ class Layer(core.Layer): ...@@ -216,6 +216,8 @@ class Layer(core.Layer):
return object.__getattribute__(self, name) return object.__getattribute__(self, name)
def __setattr__(self, name, value): def __setattr__(self, name, value):
if isinstance(getattr(type(self), name, None), property):
object.__setattr__(self, name, value)
if isinstance(value, framework.Parameter): if isinstance(value, framework.Parameter):
params = self.__dict__.get('_parameters', None) params = self.__dict__.get('_parameters', None)
if params is None: if params is None:
...@@ -226,6 +228,11 @@ class Layer(core.Layer): ...@@ -226,6 +228,11 @@ class Layer(core.Layer):
tensor = var.get_tensor() tensor = var.get_tensor()
tensor.set(self._loaddict_holder[value.name].numpy(), tensor.set(self._loaddict_holder[value.name].numpy(),
framework._current_expected_place()) framework._current_expected_place())
if name in params:
# remove unused param in tracer
if framework._dygraph_tracer_ is not None:
framework._dygraph_tracer_._vars.pop(params[name].name,
None)
params[name] = value params[name] = value
elif isinstance(value, core.Layer): elif isinstance(value, core.Layer):
layers = self.__dict__.get('_sub_layers', None) layers = self.__dict__.get('_sub_layers', None)
......
...@@ -83,7 +83,7 @@ class Conv2D(layers.Layer): ...@@ -83,7 +83,7 @@ class Conv2D(layers.Layer):
H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\ H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1 W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args: Parameters:
name_scope(str) : The name for this class. name_scope(str) : The name for this class.
num_filters(int): The number of filter. It is as same as the output num_filters(int): The number of filter. It is as same as the output
image channel. image channel.
...@@ -119,6 +119,10 @@ class Conv2D(layers.Layer): ...@@ -119,6 +119,10 @@ class Conv2D(layers.Layer):
act (str): Activation type, if it is set to None, activation is not appended. act (str): Activation type, if it is set to None, activation is not appended.
Default: None Default: None
Attributes:
weight (Parameter): the learnable weights of filter of this layer.
bias (Parameter|None): the learnable bias of this layer.
Raises: Raises:
ValueError: If the shapes of input, filter_size, stride, padding and ValueError: If the shapes of input, filter_size, stride, padding and
groups mismatch. groups mismatch.
...@@ -205,6 +209,22 @@ class Conv2D(layers.Layer): ...@@ -205,6 +209,22 @@ class Conv2D(layers.Layer):
dtype=self._dtype, dtype=self._dtype,
is_bias=True) is_bias=True)
@property
def weight(self):
return self._filter_param
@weight.setter
def weight(self, value):
self._filter_param = value
@property
def bias(self):
return self._bias_param
@bias.setter
def bias(self, value):
self._bias_param = value
def forward(self, input): def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference( pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype) dtype=self._dtype)
...@@ -288,7 +308,7 @@ class Conv3D(layers.Layer): ...@@ -288,7 +308,7 @@ class Conv3D(layers.Layer):
H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\ H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1 W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Args: Parameters:
name_scope(str) : The name for this class. name_scope(str) : The name for this class.
num_filters(int): The number of filter. It is as same as the output image channel. num_filters(int): The number of filter. It is as same as the output image channel.
filter_size (int|tuple|None): The filter size. If filter_size is a tuple, filter_size (int|tuple|None): The filter size. If filter_size is a tuple,
...@@ -323,6 +343,10 @@ class Conv3D(layers.Layer): ...@@ -323,6 +343,10 @@ class Conv3D(layers.Layer):
act (str): Activation type, if it is set to None, activation is not appended. act (str): Activation type, if it is set to None, activation is not appended.
Default: None. Default: None.
Attributes:
weight (Parameter): the learnable weights of filters of this layer.
bias (Parameter|None): the learnable bias of this layer.
Returns: Returns:
Variable: The tensor variable storing the convolution and \ Variable: The tensor variable storing the convolution and \
non-linearity activation result. non-linearity activation result.
...@@ -405,6 +429,22 @@ class Conv3D(layers.Layer): ...@@ -405,6 +429,22 @@ class Conv3D(layers.Layer):
dtype=self._dtype, dtype=self._dtype,
is_bias=True) is_bias=True)
@property
def weight(self):
return self._filter_param
@weight.setter
def weight(self, value):
self._filter_param = value
@property
def bias(self):
return self._bias_param
@bias.setter
def bias(self, value):
self._bias_param = value
def forward(self, input): def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference( pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype) dtype=self._dtype)
...@@ -425,15 +465,17 @@ class Conv3D(layers.Layer): ...@@ -425,15 +465,17 @@ class Conv3D(layers.Layer):
'use_mkldnn': False 'use_mkldnn': False
}) })
pre_act = self._helper.create_variable_for_type_inference( if self._bias_param is not None:
dtype=self._dtype) pre_act = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op( self._helper.append_op(
type='elementwise_add', type='elementwise_add',
inputs={'X': [pre_bias], inputs={'X': [pre_bias],
'Y': [self._bias_param]}, 'Y': [self._bias_param]},
outputs={'Out': [pre_act]}, outputs={'Out': [pre_act]},
attrs={'axis': 1}) attrs={'axis': 1})
else:
pre_act = pre_bias
return self._helper.append_activation(pre_act, act=self._act) return self._helper.append_activation(pre_act, act=self._act)
...@@ -489,7 +531,7 @@ class Conv3DTranspose(layers.Layer): ...@@ -489,7 +531,7 @@ class Conv3DTranspose(layers.Layer):
H_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\ H_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\
W_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 W_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1
Args: Parameters:
name_scope(str) : The name for this class. name_scope(str) : The name for this class.
num_filters(int): The number of the filter. It is as same as the output num_filters(int): The number of the filter. It is as same as the output
image channel. image channel.
...@@ -531,6 +573,10 @@ class Conv3DTranspose(layers.Layer): ...@@ -531,6 +573,10 @@ class Conv3DTranspose(layers.Layer):
name(str|None): A name for this layer(optional). If set None, the layer name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically. will be named automatically.
Attributes:
weight (Parameter): the learnable weights of filters of this layer.
bias (Parameter|None): the learnable bias of this layer.
Returns: Returns:
Variable: The tensor variable storing the convolution transpose result. Variable: The tensor variable storing the convolution transpose result.
...@@ -627,6 +673,22 @@ class Conv3DTranspose(layers.Layer): ...@@ -627,6 +673,22 @@ class Conv3DTranspose(layers.Layer):
dtype=self._dtype, dtype=self._dtype,
is_bias=True) is_bias=True)
@property
def weight(self):
return self._img_filter
@weight.setter
def weight(self, value):
self._img_filter = value
@property
def bias(self):
return self._bias_param
@bias.setter
def bias(self, value):
self._bias_param = value
def forward(self, input): def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference( pre_bias = self._helper.create_variable_for_type_inference(
dtype=self._dtype) dtype=self._dtype)
...@@ -667,7 +729,7 @@ class Pool2D(layers.Layer): ...@@ -667,7 +729,7 @@ class Pool2D(layers.Layer):
Parameters(ksize, strides, paddings) are two elements. These two elements represent height and width, respectively. Parameters(ksize, strides, paddings) are two elements. These two elements represent height and width, respectively.
The input(X) size and output(Out) size may be different. The input(X) size and output(Out) size may be different.
Args: Parameters:
name_scope(str) : The name of this class. name_scope(str) : The name of this class.
pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
it must contain two integers, (pool_size_Height, pool_size_Width). it must contain two integers, (pool_size_Height, pool_size_Width).
...@@ -824,7 +886,7 @@ class FC(layers.Layer): ...@@ -824,7 +886,7 @@ class FC(layers.Layer):
out.data = [[0.18669507, 0.1893476]] out.data = [[0.18669507, 0.1893476]]
out.shape = (1, 2) out.shape = (1, 2)
Args: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
size(int): The number of output units in this layer. size(int): The number of output units in this layer.
num_flatten_dims (int): The fc layer can accept an input tensor with more than num_flatten_dims (int): The fc layer can accept an input tensor with more than
...@@ -845,6 +907,10 @@ class FC(layers.Layer): ...@@ -845,6 +907,10 @@ class FC(layers.Layer):
is_test(bool): A flag indicating whether execution is in test phase. Default: False is_test(bool): A flag indicating whether execution is in test phase. Default: False
dtype(str): Dtype used for weight dtype(str): Dtype used for weight
Attributes:
weight (list of Parameter): the learnable weights of this layer.
bias (Parameter|None): the learnable bias of this layer.
Raises: Raises:
ValueError: If rank of the input tensor is less than 2. ValueError: If rank of the input tensor is less than 2.
...@@ -883,15 +949,6 @@ class FC(layers.Layer): ...@@ -883,15 +949,6 @@ class FC(layers.Layer):
self._act = act self._act = act
self.__w = list() self.__w = list()
@property
def _w(self, i=0):
return self.__w[i]
@_w.setter
def _w(self, value, i=0):
assert isinstance(value, Parameter)
self.__w[i] = value
def _build_once(self, input): def _build_once(self, input):
i = 0 i = 0
for inp, param in self._helper.iter_inputs_and_params(input, for inp, param in self._helper.iter_inputs_and_params(input,
...@@ -916,6 +973,36 @@ class FC(layers.Layer): ...@@ -916,6 +973,36 @@ class FC(layers.Layer):
self._b = self.create_parameter( self._b = self.create_parameter(
attr=self._bias_attr, shape=size, dtype=self._dtype, is_bias=True) attr=self._bias_attr, shape=size, dtype=self._dtype, is_bias=True)
# TODO(songyouwei): We should remove _w property
@property
def _w(self, i=0):
return self.__w[i]
@_w.setter
def _w(self, value, i=0):
assert isinstance(self.__w[i], Variable)
self.__w[i].set_value(value)
@property
def weight(self):
if len(self.__w) > 1:
return self.__w
else:
return self.__w[0]
@weight.setter
def weight(self, value):
if len(self.__w) == 1:
self.__w[0] = value
@property
def bias(self):
return self._b
@bias.setter
def bias(self, value):
self._b = value
def forward(self, input): def forward(self, input):
mul_results = list() mul_results = list()
i = 0 i = 0
...@@ -1000,7 +1087,7 @@ class BatchNorm(layers.Layer): ...@@ -1000,7 +1087,7 @@ class BatchNorm(layers.Layer):
\\sigma_{\\beta}^{2} + \\epsilon}} \\\\ \\sigma_{\\beta}^{2} + \\epsilon}} \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta y_i &\\gets \\gamma \\hat{x_i} + \\beta
Args: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
act(str|None): Activation type, linear|relu|prelu|... act(str|None): Activation type, linear|relu|prelu|...
is_test (bool): A flag indicating whether it is in is_test (bool): A flag indicating whether it is in
...@@ -1184,7 +1271,7 @@ class Embedding(layers.Layer): ...@@ -1184,7 +1271,7 @@ class Embedding(layers.Layer):
:attr:`input`. :attr:`input`.
All the input variables are passed in as local variables to the LayerHelper constructor All the input variables are passed in as local variables to the LayerHelper constructor
Args: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
size(tuple|list): The shape of the look up table parameter. It should have two elements which indicate the size size(tuple|list): The shape of the look up table parameter. It should have two elements which indicate the size
of the dictionary of embeddings and the size of each embedding vector respectively. of the dictionary of embeddings and the size of each embedding vector respectively.
...@@ -1196,6 +1283,9 @@ class Embedding(layers.Layer): ...@@ -1196,6 +1283,9 @@ class Embedding(layers.Layer):
param_attr(ParamAttr): Parameters for this layer. Default: None. param_attr(ParamAttr): Parameters for this layer. Default: None.
dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc. Default: 'float32'. dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc. Default: 'float32'.
Attributes:
weight (Parameter): the learnable weights of this layer.
Returns: Returns:
Variable: The tensor variable storing the embeddings of the \ Variable: The tensor variable storing the embeddings of the \
supplied inputs. supplied inputs.
...@@ -1246,6 +1336,14 @@ class Embedding(layers.Layer): ...@@ -1246,6 +1336,14 @@ class Embedding(layers.Layer):
dtype=self._dtype, dtype=self._dtype,
is_bias=False) is_bias=False)
@property
def weight(self):
return self._w
@weight.setter
def weight(self, value):
self._w = value
def forward(self, input): def forward(self, input):
out = self._helper.create_variable_for_type_inference(self._dtype) out = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op( self._helper.append_op(
...@@ -1291,7 +1389,7 @@ class LayerNorm(layers.Layer): ...@@ -1291,7 +1389,7 @@ class LayerNorm(layers.Layer):
* :math:`b`: the trainable bias parameter. * :math:`b`: the trainable bias parameter.
Args: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
scale(bool): Whether to learn the adaptive gain :math:`g` after scale(bool): Whether to learn the adaptive gain :math:`g` after
normalization. Default: True. normalization. Default: True.
...@@ -1451,7 +1549,7 @@ class GRUUnit(layers.Layer): ...@@ -1451,7 +1549,7 @@ class GRUUnit(layers.Layer):
This layer has three outputs :math:`h_t`, :math:`dot(r_t, h_{t-1})` This layer has three outputs :math:`h_t`, :math:`dot(r_t, h_{t-1})`
and concatenation of :math:`u_t`, :math:`r_t` and :math:`m_t`. and concatenation of :math:`u_t`, :math:`r_t` and :math:`m_t`.
Args: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
size (int): The input dimension value. size (int): The input dimension value.
param_attr(ParamAttr|None): The parameter attribute for the learnable param_attr(ParamAttr|None): The parameter attribute for the learnable
...@@ -1481,6 +1579,10 @@ class GRUUnit(layers.Layer): ...@@ -1481,6 +1579,10 @@ class GRUUnit(layers.Layer):
Default: 'sigmoid' Default: 'sigmoid'
dtype(str): The dtype of the layers. Default: 'float32' dtype(str): The dtype of the layers. Default: 'float32'
Attributes:
weight (Parameter): the learnable weights of this layer.
bias (Parameter): the learnable bias of this layer.
Returns: Returns:
tuple: The hidden value, reset-hidden value and gate values. tuple: The hidden value, reset-hidden value and gate values.
...@@ -1515,6 +1617,7 @@ class GRUUnit(layers.Layer): ...@@ -1515,6 +1617,7 @@ class GRUUnit(layers.Layer):
origin_mode=False, origin_mode=False,
dtype='float32'): dtype='float32'):
super(GRUUnit, self).__init__(name_scope, dtype) super(GRUUnit, self).__init__(name_scope, dtype)
self._bias_attr = bias_attr
activation_dict = dict( activation_dict = dict(
identity=0, identity=0,
...@@ -1532,9 +1635,26 @@ class GRUUnit(layers.Layer): ...@@ -1532,9 +1635,26 @@ class GRUUnit(layers.Layer):
# create bias # create bias
bias_size = [1, 3 * size] bias_size = [1, 3 * size]
self._bias_size = bias_size
self._bias = self.create_parameter( self._bias = self.create_parameter(
attr=bias_attr, shape=bias_size, dtype=dtype, is_bias=True) attr=bias_attr, shape=bias_size, dtype=dtype, is_bias=True)
@property
def weight(self):
return self._weight
@weight.setter
def weight(self, value):
self._weight = value
@property
def bias(self):
return self._bias
@bias.setter
def bias(self, value):
self._bias = value
def forward(self, input, hidden): def forward(self, input, hidden):
inputs = {'Input': input, 'HiddenPrev': hidden, 'Weight': self._weight} inputs = {'Input': input, 'HiddenPrev': hidden, 'Weight': self._weight}
if self._bias: if self._bias:
...@@ -1567,7 +1687,7 @@ class NCE(layers.Layer): ...@@ -1567,7 +1687,7 @@ class NCE(layers.Layer):
`Noise-contrastive estimation: A new estimation principle for unnormalized statistical models <http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf>`_ . `Noise-contrastive estimation: A new estimation principle for unnormalized statistical models <http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf>`_ .
By default this operator uses a uniform distribution for sampling. By default this operator uses a uniform distribution for sampling.
Args: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
num_total_classes (int): Total number of classes in all samples num_total_classes (int): Total number of classes in all samples
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
...@@ -1590,6 +1710,10 @@ class NCE(layers.Layer): ...@@ -1590,6 +1710,10 @@ class NCE(layers.Layer):
seed (int): The seed used in sampler. Default: 0. seed (int): The seed used in sampler. Default: 0.
is_sparse(bool): The flag indicating whether to use sparse update, the weight@GRAD and bias@GRAD will be changed to SelectedRows. Default: False. is_sparse(bool): The flag indicating whether to use sparse update, the weight@GRAD and bias@GRAD will be changed to SelectedRows. Default: False.
Attributes:
weight (Parameter): the learnable weights of this layer.
bias (Parameter|None): the learnable bias of this layer.
Returns: Returns:
Variable: The output nce loss. Variable: The output nce loss.
...@@ -1763,6 +1887,22 @@ class NCE(layers.Layer): ...@@ -1763,6 +1887,22 @@ class NCE(layers.Layer):
self._inputs['Bias'] = self._b self._inputs['Bias'] = self._b
self._inputs['Weight'] = self._w self._inputs['Weight'] = self._w
@property
def weight(self):
return self._w
@weight.setter
def weight(self, value):
self._w = value
@property
def bias(self):
return self._b
@bias.setter
def bias(self, value):
self._b = value
def forward(self, input, label, sample_weight=None): def forward(self, input, label, sample_weight=None):
assert isinstance(input, Variable) assert isinstance(input, Variable)
assert isinstance(label, Variable) assert isinstance(label, Variable)
...@@ -1797,7 +1937,7 @@ class PRelu(layers.Layer): ...@@ -1797,7 +1937,7 @@ class PRelu(layers.Layer):
.. math:: .. math::
y = \max(0, x) + \\alpha * \min(0, x) y = \max(0, x) + \\alpha * \min(0, x)
Args: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
mode (str): The mode for weight sharing. It supports all, channel mode (str): The mode for weight sharing. It supports all, channel
and element. all: all elements share same weight and element. all: all elements share same weight
...@@ -1806,6 +1946,9 @@ class PRelu(layers.Layer): ...@@ -1806,6 +1946,9 @@ class PRelu(layers.Layer):
param_attr(ParamAttr|None): The parameter attribute for the learnable param_attr(ParamAttr|None): The parameter attribute for the learnable
weight (alpha). weight (alpha).
Attributes:
weight (Parameter): the learnable weights of this layer.
Returns: Returns:
Variable: The output tensor with the same shape as input. Variable: The output tensor with the same shape as input.
...@@ -1849,6 +1992,14 @@ class PRelu(layers.Layer): ...@@ -1849,6 +1992,14 @@ class PRelu(layers.Layer):
is_bias=False, is_bias=False,
default_initializer=Constant(1.0)) default_initializer=Constant(1.0))
@property
def weight(self):
return self._alpha
@weight.setter
def weight(self, value):
self._alpha = value
def forward(self, input): def forward(self, input):
out = self._helper.create_variable_for_type_inference(self._dtype) out = self._helper.create_variable_for_type_inference(self._dtype)
...@@ -1878,7 +2029,7 @@ class BilinearTensorProduct(layers.Layer): ...@@ -1878,7 +2029,7 @@ class BilinearTensorProduct(layers.Layer):
- :math:`out_{i}`: the i-th element of out, shape is [batch_size, size]. - :math:`out_{i}`: the i-th element of out, shape is [batch_size, size].
- :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`. - :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`.
Args: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
size (int): The dimension of this layer. size (int): The dimension of this layer.
act (str): Activation to be applied to the output of this layer. Default: None. act (str): Activation to be applied to the output of this layer. Default: None.
...@@ -1889,6 +2040,10 @@ class BilinearTensorProduct(layers.Layer): ...@@ -1889,6 +2040,10 @@ class BilinearTensorProduct(layers.Layer):
of this layer. If it is set to False, no bias will be added to the output units. of this layer. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. Default: None. If it is set to None, the bias is initialized zero. Default: None.
Attributes:
weight (Parameter): the learnable weights of this layer.
bias (Parameter|None): the learnable bias of this layer.
Returns: Returns:
Variable: A 2-D Tensor of shape [batch_size, size]. Variable: A 2-D Tensor of shape [batch_size, size].
...@@ -1940,6 +2095,22 @@ class BilinearTensorProduct(layers.Layer): ...@@ -1940,6 +2095,22 @@ class BilinearTensorProduct(layers.Layer):
dtype=self._dtype, dtype=self._dtype,
is_bias=True) is_bias=True)
@property
def weight(self):
return self._w
@weight.setter
def weight(self, value):
self._w = value
@property
def bias(self):
return self._bias_param
@bias.setter
def bias(self, value):
self._bias_param = value
def forward(self, x, y): def forward(self, x, y):
self._inputs = {"X": x, "Y": y, "Weight": self._w} self._inputs = {"X": x, "Y": y, "Weight": self._w}
if self._bias_param: if self._bias_param:
...@@ -2013,7 +2184,7 @@ class Conv2DTranspose(layers.Layer): ...@@ -2013,7 +2184,7 @@ class Conv2DTranspose(layers.Layer):
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ) \\\\ H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ) \\\\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] ) W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] )
Args: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
num_filters(int): The number of the filter. It is as same as the output num_filters(int): The number of the filter. It is as same as the output
image channel. image channel.
...@@ -2055,6 +2226,10 @@ class Conv2DTranspose(layers.Layer): ...@@ -2055,6 +2226,10 @@ class Conv2DTranspose(layers.Layer):
act (str): Activation type, if it is set to None, activation is not appended. act (str): Activation type, if it is set to None, activation is not appended.
Default: None. Default: None.
Attributes:
weight (Parameter): the learnable weights of filters of this layer.
bias (Parameter|None): the learnable bias of this layer.
Returns: Returns:
Variable: The tensor variable storing the convolution transpose result. Variable: The tensor variable storing the convolution transpose result.
...@@ -2163,6 +2338,22 @@ class Conv2DTranspose(layers.Layer): ...@@ -2163,6 +2338,22 @@ class Conv2DTranspose(layers.Layer):
dtype=self._dtype, dtype=self._dtype,
is_bias=True) is_bias=True)
@property
def weight(self):
return self._img_filter
@weight.setter
def weight(self, value):
self._img_filter = value
@property
def bias(self):
return self._bias_param
@bias.setter
def bias(self, value):
self._bias_param = value
def forward(self, input): def forward(self, input):
pre_bias = self._helper.create_variable_for_type_inference( pre_bias = self._helper.create_variable_for_type_inference(
dtype=input.dtype) dtype=input.dtype)
...@@ -2202,7 +2393,7 @@ class SequenceConv(layers.Layer): ...@@ -2202,7 +2393,7 @@ class SequenceConv(layers.Layer):
other convolutional configurations for the filters and stride as given other convolutional configurations for the filters and stride as given
in the input parameters to the function. in the input parameters to the function.
Args: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
num_filters (int): number of filters. num_filters (int): number of filters.
filter_size (int): the filter size (H and W). Default: 3. filter_size (int): the filter size (H and W). Default: 3.
...@@ -2220,6 +2411,10 @@ class SequenceConv(layers.Layer): ...@@ -2220,6 +2411,10 @@ class SequenceConv(layers.Layer):
act (str): Activation type, if it is set to None, activation is not appended. act (str): Activation type, if it is set to None, activation is not appended.
Default: None. Default: None.
Attributes:
weight (Parameter): the learnable weights of filters of this layer.
bias (Parameter|None): the learnable bias of this layer.
Returns: Returns:
Variable: output of sequence_conv Variable: output of sequence_conv
""" """
...@@ -2305,7 +2500,7 @@ class RowConv(layers.Layer): ...@@ -2305,7 +2500,7 @@ class RowConv(layers.Layer):
More details about row_conv please refer to the design document https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645 . More details about row_conv please refer to the design document https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645 .
Args: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
future_context_size (int): Future context size. Please note, the shape future_context_size (int): Future context size. Please note, the shape
of convolution kernel is [future_context_size + 1, D]. of convolution kernel is [future_context_size + 1, D].
...@@ -2313,6 +2508,9 @@ class RowConv(layers.Layer): ...@@ -2313,6 +2508,9 @@ class RowConv(layers.Layer):
name, initializer etc. Default: None. name, initializer etc. Default: None.
act (str): Non-linear activation to be applied to output variable. Default: None. act (str): Non-linear activation to be applied to output variable. Default: None.
Attributes:
weight (Parameter): the learnable weights of this layer.
Returns: Returns:
the output(Out) is a LodTensor, which supports variable time-length input sequences. the output(Out) is a LodTensor, which supports variable time-length input sequences.
The underlying tensor in this LodTensor is a matrix with shape T x N, i.e., the same shape as X. The underlying tensor in this LodTensor is a matrix with shape T x N, i.e., the same shape as X.
...@@ -2368,7 +2566,7 @@ class GroupNorm(layers.Layer): ...@@ -2368,7 +2566,7 @@ class GroupNorm(layers.Layer):
Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ . Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .
Args: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
groups(int): The number of groups that divided from channels. groups(int): The number of groups that divided from channels.
epsilon(float): The small value added to the variance to prevent epsilon(float): The small value added to the variance to prevent
...@@ -2496,7 +2694,7 @@ class SpectralNorm(layers.Layer): ...@@ -2496,7 +2694,7 @@ class SpectralNorm(layers.Layer):
Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ . Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ .
Args: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
dim(int): The index of dimension which should be permuted to the first before reshaping Input(Weight) to matrix, it should be set as 0 if Input(Weight) is the weight of fc layer, and should be set as 1 if Input(Weight) is the weight of conv layer. Default: 0. dim(int): The index of dimension which should be permuted to the first before reshaping Input(Weight) to matrix, it should be set as 0 if Input(Weight) is the weight of fc layer, and should be set as 1 if Input(Weight) is the weight of conv layer. Default: 0.
power_iters(int): The number of power iterations to calculate spectral norm. Default: 1. power_iters(int): The number of power iterations to calculate spectral norm. Default: 1.
...@@ -2573,7 +2771,7 @@ class TreeConv(layers.Layer): ...@@ -2573,7 +2771,7 @@ class TreeConv(layers.Layer):
The paper of Tree-Based Convolution Operator is here: https://arxiv.org/abs/1409.5718v1 The paper of Tree-Based Convolution Operator is here: https://arxiv.org/abs/1409.5718v1
Args: Parameters:
name_scope(str): The name of this class. name_scope(str): The name of this class.
output_size(int): output feature width output_size(int): output feature width
num_filters(int): number of filters, Default: 1. num_filters(int): number of filters, Default: 1.
...@@ -2583,6 +2781,10 @@ class TreeConv(layers.Layer): ...@@ -2583,6 +2781,10 @@ class TreeConv(layers.Layer):
bias_attr(ParamAttr): the parameter attribute for the bias of this layer, Default: None. bias_attr(ParamAttr): the parameter attribute for the bias of this layer, Default: None.
name(str): a name of this layer(optional). If set None, the layer will be named automatically, Default: None. name(str): a name of this layer(optional). If set None, the layer will be named automatically, Default: None.
Attributes:
weight (Parameter): the learnable weights of filters of this layer.
bias (Parameter|None): the learnable bias of this layer.
Returns: Returns:
out(Variable): (Tensor) The feature vector of subtrees. The shape of the output tensor is [max_tree_node_size, output_size, num_filters]. The output tensor could be a new feature vector for next tree convolution layers out(Variable): (Tensor) The feature vector of subtrees. The shape of the output tensor is [max_tree_node_size, output_size, num_filters]. The output tensor could be a new feature vector for next tree convolution layers
...@@ -2639,6 +2841,22 @@ class TreeConv(layers.Layer): ...@@ -2639,6 +2841,22 @@ class TreeConv(layers.Layer):
dtype=self._dtype, dtype=self._dtype,
is_bias=False) is_bias=False)
@property
def weight(self):
return self.W
@weight.setter
def weight(self, value):
self.W = value
@property
def bias(self):
return self._bias_param
@bias.setter
def bias(self, value):
self._bias_param = value
def forward(self, nodes_vector, edge_set): def forward(self, nodes_vector, edge_set):
if self._name: if self._name:
......
...@@ -638,6 +638,45 @@ class Variable(object): ...@@ -638,6 +638,45 @@ class Variable(object):
new_ivar = self._ivar._copy_to(core.CPUPlace(), True) new_ivar = self._ivar._copy_to(core.CPUPlace(), True)
return np.array(new_ivar.value().get_tensor()) return np.array(new_ivar.value().get_tensor())
@dygraph_only
def set_value(self, value):
"""
Set a new value for this Variable.
Args:
value (Variable|np.ndarray): the new value.
Returns:
None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph import FC
import numpy as np
data = np.ones([3, 32, 32], dtype='float32')
with fluid.dygraph.guard():
fc = fluid.dygraph.FC("fc", 4)
t = to_variable(data)
fc(t) # call with default weight
custom_weight = np.random.randn(1024, 4).astype("float32")
fc.weight.set_value(custom_weight) # change existing weight
out = fc(t) # call with different weight
"""
assert isinstance(value, (Variable, np.ndarray))
if list(value.shape) != list(self.shape):
raise ValueError(
"The shape of the new value must be the same as that of the original Variable."
)
self_tensor = self._ivar.value().get_tensor()
if isinstance(value, Variable):
value = value._ivar.value().get_tensor().__array__()
self_tensor.set(value, _current_expected_place())
@dygraph_only @dygraph_only
def backward(self, backward_strategy=None): def backward(self, backward_strategy=None):
""" """
...@@ -1042,7 +1081,7 @@ class Variable(object): ...@@ -1042,7 +1081,7 @@ class Variable(object):
if self.shape[axis] < 0: if self.shape[axis] < 0:
return self._cloneVar(True) return self._cloneVar(True)
index = int(item) index = int(item)
if (index > 0 and index >= self.shape[axis])\ if (index > 0 and index >= self.shape[axis]) \
or (index < 0 and (index + self.shape[axis]) < 0): or (index < 0 and (index + self.shape[axis]) < 0):
raise IndexError("invalid index") raise IndexError("invalid index")
return self._sliceVar([axis], [index], [index + 1]) return self._sliceVar([axis], [index], [index + 1])
...@@ -2662,10 +2701,10 @@ class IrOpNode(IrNode): ...@@ -2662,10 +2701,10 @@ class IrOpNode(IrNode):
if isinstance(val, Block): if isinstance(val, Block):
desc.set_block_attr(name, val.desc) desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and \ elif isinstance(val, list) and val and \
all(isinstance(v, Block) for v in val): all(isinstance(v, Block) for v in val):
desc.set_blocks_attr(name, [v.desc for v in val]) desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \ elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc): isinstance(val, core.ProgramDesc):
desc.set_serialized_attr(name, val.serialize_to_string()) desc.set_serialized_attr(name, val.serialize_to_string())
else: else:
desc._set_attr(name, val) desc._set_attr(name, val)
...@@ -2888,8 +2927,8 @@ class IrGraph(object): ...@@ -2888,8 +2927,8 @@ class IrGraph(object):
op_node(IrOpNode): the operator node that is needed to update input's link. op_node(IrOpNode): the operator node that is needed to update input's link.
""" """
assert old_input_node.node in self.graph.nodes() and new_input_node.node in \ assert old_input_node.node in self.graph.nodes() and new_input_node.node in \
self.graph.nodes() and op_node.node in self.graph.nodes(), \ self.graph.nodes() and op_node.node in self.graph.nodes(), \
'The three arguments(old_input_node&new_input_node&op_node) must be in the graph nodes.' 'The three arguments(old_input_node&new_input_node&op_node) must be in the graph nodes.'
old_input_node.remove_output(op_node) old_input_node.remove_output(op_node)
op_node.remove_input(old_input_node) op_node.remove_input(old_input_node)
new_input_node.append_output(op_node) new_input_node.append_output(op_node)
...@@ -3024,7 +3063,7 @@ class IrGraph(object): ...@@ -3024,7 +3063,7 @@ class IrGraph(object):
def _convert_to_pdf(dot_file_path): def _convert_to_pdf(dot_file_path):
pdf_save_path = os.path.splitext(dot_file_path)[0] + '.pdf' pdf_save_path = os.path.splitext(dot_file_path)[0] + '.pdf'
exited_code = subprocess.call('dot -Tpdf ' + dot_file_path \ exited_code = subprocess.call('dot -Tpdf ' + dot_file_path \
+ ' -o ' + pdf_save_path, shell=True) + ' -o ' + pdf_save_path, shell=True)
if exited_code != 0: if exited_code != 0:
print('The dot command is needed for creating pdf files.') print('The dot command is needed for creating pdf files.')
print('The {} is saved as the dot filetype.'.format( print('The {} is saved as the dot filetype.'.format(
......
...@@ -82,6 +82,34 @@ class LayerTest(unittest.TestCase): ...@@ -82,6 +82,34 @@ class LayerTest(unittest.TestCase):
class TestLayer(LayerTest): class TestLayer(LayerTest):
def test_custom_layer_with_kwargs(self):
class CustomLayer(fluid.Layer):
def __init__(self, name_scope, fc1_size=4):
super(CustomLayer, self).__init__(name_scope)
self.fc1 = nn.FC('fc1',
size=fc1_size,
bias_attr=False,
num_flatten_dims=1)
self.fc2 = nn.FC('fc2',
size=1,
bias_attr=False,
num_flatten_dims=1)
def forward(self, x, do_fc2=False):
ret = self.fc1(x)
if do_fc2:
ret = self.fc2(ret)
return ret
with self.dynamic_graph():
inp = np.ones([3, 3], dtype='float32')
x = base.to_variable(inp)
custom = CustomLayer('custom', fc1_size=2)
ret = custom(x, do_fc2=False)
self.assertTrue(np.array_equal(ret.numpy().shape, [3, 2]))
ret = custom(x, do_fc2=True)
self.assertTrue(np.array_equal(ret.numpy().shape, [3, 1]))
def test_fc(self): def test_fc(self):
inp = np.ones([3, 32, 32], dtype='float32') inp = np.ones([3, 32, 32], dtype='float32')
with self.static_graph(): with self.static_graph():
...@@ -117,6 +145,62 @@ class TestLayer(LayerTest): ...@@ -117,6 +145,62 @@ class TestLayer(LayerTest):
self.assertTrue(np.array_equal(static_ret, static_ret2)) self.assertTrue(np.array_equal(static_ret, static_ret2))
self.assertTrue(np.array_equal(static_ret, dy_ret_value)) self.assertTrue(np.array_equal(static_ret, dy_ret_value))
with self.dynamic_graph():
custom_weight = np.random.randn(1024, 4).astype("float32")
weight_attr1 = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
custom_weight))
fc1 = fluid.dygraph.FC("fc1",
4,
num_flatten_dims=1,
param_attr=weight_attr1)
out1 = fc1(base.to_variable(inp))
loss1 = fluid.layers.reduce_mean(out1)
fc1_weight_init = fc1.weight.detach()
fc1_bias_init = fc1.bias.detach()
loss1.backward()
optimizer1 = fluid.optimizer.SGD(learning_rate=0.1)
optimizer1.minimize(loss1)
fc1_weight_updated = fc1.weight.detach()
with self.dynamic_graph():
weight_attr2 = fluid.ParamAttr(
initializer=fluid.initializer.Uniform())
fc2 = fluid.dygraph.FC("fc2",
4,
num_flatten_dims=1,
param_attr=weight_attr2)
out2 = fc2(base.to_variable(inp))
self.assertFalse(
np.array_equal(fc1_weight_init.numpy(), fc2.weight.numpy()))
self.assertFalse(np.array_equal(out1.numpy(), out2.numpy()))
mismatched_weight = np.random.randn(4, 4).astype("float32")
with self.assertRaises(ValueError):
fc2.weight.set_value(mismatched_weight)
fc2.weight.set_value(fc1_weight_init)
fc2.bias.set_value(fc1_bias_init)
out2 = fc2(base.to_variable(inp))
loss2 = fluid.layers.reduce_mean(out2)
loss2.backward()
optimizer2 = fluid.optimizer.SGD(learning_rate=0.1)
optimizer2.minimize(loss2)
self.assertTrue(
np.array_equal(fc2.weight.numpy(), fc1_weight_updated.numpy()))
self.assertTrue(np.array_equal(out1.numpy(), out2.numpy()))
fc2.weight = fc1.weight
fc2.bias = fc1.bias
self.assertTrue(
np.array_equal(fc2.weight.numpy(), fc1.weight.numpy()))
self.assertTrue(np.array_equal(fc2.bias.numpy(), fc1.bias.numpy()))
def test_layer_norm(self): def test_layer_norm(self):
inp = np.ones([3, 32, 32], dtype='float32') inp = np.ones([3, 32, 32], dtype='float32')
with self.static_graph(): with self.static_graph():
...@@ -238,6 +322,41 @@ class TestLayer(LayerTest): ...@@ -238,6 +322,41 @@ class TestLayer(LayerTest):
self.assertTrue(np.allclose(static_ret, dy_ret_value)) self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, static_ret2)) self.assertTrue(np.allclose(static_ret, static_ret2))
with self.dynamic_graph():
images = np.ones([2, 3, 5, 5], dtype='float32')
custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
custom_weight))
conv2d1 = nn.Conv2D('conv2d1', num_filters=3, filter_size=[2, 2])
conv2d2 = nn.Conv2D(
'conv2d2',
num_filters=3,
filter_size=[2, 2],
param_attr=weight_attr)
dy_ret1 = conv2d1(base.to_variable(images))
dy_ret2 = conv2d2(base.to_variable(images))
self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))
conv2d1_weight_np = conv2d1.weight.numpy()
conv2d1_bias = conv2d1.bias
self.assertFalse(
np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy()))
conv2d2.weight.set_value(conv2d1_weight_np)
self.assertTrue(
np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy()))
conv2d2.bias.set_value(conv2d1_bias)
dy_ret1 = conv2d1(base.to_variable(images))
dy_ret2 = conv2d2(base.to_variable(images))
self.assertTrue(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))
conv2d2.weight = conv2d1.weight
conv2d2.bias = conv2d1.bias
self.assertTrue(
np.array_equal(conv2d1.weight.numpy(), conv2d2.weight.numpy()))
self.assertTrue(
np.array_equal(conv2d1.bias.numpy(), conv2d2.bias.numpy()))
def test_gru_unit(self): def test_gru_unit(self):
lod = [[2, 4, 3]] lod = [[2, 4, 3]]
D = 5 D = 5
...@@ -282,6 +401,37 @@ class TestLayer(LayerTest): ...@@ -282,6 +401,37 @@ class TestLayer(LayerTest):
self.assertTrue(np.allclose(static_ret[i], static_ret2[i])) self.assertTrue(np.allclose(static_ret[i], static_ret2[i]))
self.assertTrue(np.allclose(static_ret[i], dy_ret_value[i])) self.assertTrue(np.allclose(static_ret[i], dy_ret_value[i]))
with self.dynamic_graph():
custom_weight = np.random.randn(D, D * 3).astype("float32")
weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
custom_weight))
gru1 = nn.GRUUnit('gru1', size=D * 3)
gru2 = nn.GRUUnit('gru2', size=D * 3, param_attr=weight_attr)
dy_ret1 = gru1(
base.to_variable(input), base.to_variable(hidden_input))
dy_ret2 = gru2(
base.to_variable(input), base.to_variable(hidden_input))
self.assertFalse(
np.array_equal(gru1.weight.numpy(), gru2.weight.numpy()))
for o1, o2 in zip(dy_ret1, dy_ret2):
self.assertFalse(np.array_equal(o1.numpy(), o2.numpy()))
gru2.weight.set_value(gru1.weight.numpy())
gru2.bias.set_value(gru1.bias)
dy_ret1 = gru1(
base.to_variable(input), base.to_variable(hidden_input))
dy_ret2 = gru2(
base.to_variable(input), base.to_variable(hidden_input))
for o1, o2 in zip(dy_ret1, dy_ret2):
self.assertTrue(np.array_equal(o1.numpy(), o2.numpy()))
gru2.weight = gru1.weight
gru2.bias = gru1.bias
self.assertTrue(
np.array_equal(gru1.weight.numpy(), gru2.weight.numpy()))
self.assertTrue(
np.array_equal(gru1.bias.numpy(), gru2.bias.numpy()))
def test_elementwise_math(self): def test_elementwise_math(self):
n = np.ones([3, 3], dtype='float32') n = np.ones([3, 3], dtype='float32')
n2 = np.ones([3, 3], dtype='float32') * 1.1 n2 = np.ones([3, 3], dtype='float32') * 1.1
...@@ -417,6 +567,42 @@ class TestLayer(LayerTest): ...@@ -417,6 +567,42 @@ class TestLayer(LayerTest):
self.assertTrue(np.allclose(static_rlt2, static_rlt)) self.assertTrue(np.allclose(static_rlt2, static_rlt))
self.assertTrue(np.allclose(dy_rlt_value, static_rlt2)) self.assertTrue(np.allclose(dy_rlt_value, static_rlt2))
with self.dynamic_graph():
images = np.ones([2, 3, 5, 5], dtype='float32')
custom_weight = np.random.randn(3, 3, 2, 2).astype("float32")
weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
custom_weight))
conv2d1 = nn.Conv2DTranspose(
'conv2d1', num_filters=3, filter_size=[2, 2])
conv2d2 = nn.Conv2DTranspose(
'conv2d2',
num_filters=3,
filter_size=[2, 2],
param_attr=weight_attr)
dy_ret1 = conv2d1(base.to_variable(images))
dy_ret2 = conv2d2(base.to_variable(images))
self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))
conv2d1_weight_np = conv2d1.weight.numpy()
conv2d1_bias = conv2d1.bias
self.assertFalse(
np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy()))
conv2d2.weight.set_value(conv2d1_weight_np)
self.assertTrue(
np.array_equal(conv2d1_weight_np, conv2d2.weight.numpy()))
conv2d2.bias.set_value(conv2d1_bias)
dy_ret1 = conv2d1(base.to_variable(images))
dy_ret2 = conv2d2(base.to_variable(images))
self.assertTrue(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))
conv2d2.weight = conv2d1.weight
conv2d2.bias = conv2d1.bias
self.assertTrue(
np.array_equal(conv2d1.weight.numpy(), conv2d2.weight.numpy()))
self.assertTrue(
np.array_equal(conv2d1.bias.numpy(), conv2d2.bias.numpy()))
def test_bilinear_tensor_product(self): def test_bilinear_tensor_product(self):
inp_np_x = np.array([[1, 2, 3]]).astype('float32') inp_np_x = np.array([[1, 2, 3]]).astype('float32')
inp_np_y = np.array([[4, 5, 6]]).astype('float32') inp_np_y = np.array([[4, 5, 6]]).astype('float32')
...@@ -498,9 +684,36 @@ class TestLayer(LayerTest): ...@@ -498,9 +684,36 @@ class TestLayer(LayerTest):
self.assertTrue(np.array_equal(static_rlt2, static_rlt)) self.assertTrue(np.array_equal(static_rlt2, static_rlt))
self.assertTrue(np.array_equal(dy_rlt_value, static_rlt)) self.assertTrue(np.array_equal(dy_rlt_value, static_rlt))
with self.dynamic_graph():
custom_weight = np.random.randn(6, 3, 3).astype("float32")
weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
custom_weight))
btp1 = nn.BilinearTensorProduct('btp1', 6, act='sigmoid')
btp2 = nn.BilinearTensorProduct(
'btp2', 6, act='sigmoid', param_attr=weight_attr)
dy_rlt1 = btp1(
base.to_variable(inp_np_x), base.to_variable(inp_np_y))
dy_rlt2 = btp2(
base.to_variable(inp_np_x), base.to_variable(inp_np_y))
self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
btp2.weight.set_value(btp1.weight.numpy())
btp2.bias.set_value(btp1.bias)
dy_rlt1 = btp1(
base.to_variable(inp_np_x), base.to_variable(inp_np_y))
dy_rlt2 = btp2(
base.to_variable(inp_np_x), base.to_variable(inp_np_y))
self.assertTrue(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
btp2.weight = btp1.weight
btp2.bias = btp1.bias
self.assertTrue(
np.array_equal(btp1.weight.numpy(), btp2.weight.numpy()))
self.assertTrue(
np.array_equal(btp1.bias.numpy(), btp2.bias.numpy()))
def test_prelu(self): def test_prelu(self):
inp_np = np.ones([5, 200, 100, 100]).astype('float32') inp_np = np.ones([5, 200, 100, 100]).astype('float32')
with self.static_graph(): with self.static_graph():
data_t = layers.data( data_t = layers.data(
name="input", name="input",
...@@ -540,6 +753,32 @@ class TestLayer(LayerTest): ...@@ -540,6 +753,32 @@ class TestLayer(LayerTest):
self.assertTrue(np.allclose(static_rlt2, static_rlt)) self.assertTrue(np.allclose(static_rlt2, static_rlt))
self.assertTrue(np.allclose(dy_rlt_value, static_rlt)) self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
with self.dynamic_graph():
inp_np = np.random.randn(5, 200, 100, 100).astype("float32")
inp = base.to_variable(inp_np)
mode = 'channel'
prelu1 = nn.PRelu(
'prelu1',
mode=mode,
param_attr=ParamAttr(initializer=Constant(2.0)))
prelu2 = nn.PRelu(
'prelu2',
mode=mode,
param_attr=ParamAttr(initializer=Constant(1.0)))
dy_rlt1 = prelu1(inp)
dy_rlt2 = prelu2(inp)
self.assertFalse(
np.array_equal(prelu1.weight.numpy(), prelu2.weight.numpy()))
self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
prelu2.weight.set_value(prelu1.weight.numpy())
dy_rlt1 = prelu1(inp)
dy_rlt2 = prelu2(inp)
self.assertTrue(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
prelu2.weight = prelu1.weight
self.assertTrue(
np.array_equal(prelu1.weight.numpy(), prelu2.weight.numpy()))
def test_embeding(self): def test_embeding(self):
inp_word = np.array([[[1]]]).astype('int64') inp_word = np.array([[[1]]]).astype('int64')
dict_size = 20 dict_size = 20
...@@ -574,6 +813,31 @@ class TestLayer(LayerTest): ...@@ -574,6 +813,31 @@ class TestLayer(LayerTest):
self.assertTrue(np.allclose(static_rlt2, static_rlt)) self.assertTrue(np.allclose(static_rlt2, static_rlt))
self.assertTrue(np.allclose(dy_rlt_value, static_rlt)) self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
with self.dynamic_graph():
custom_weight = np.random.randn(dict_size, 32).astype("float32")
weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
custom_weight))
emb1 = nn.Embedding(
name_scope='embedding', size=[dict_size, 32], is_sparse=False)
emb2 = nn.Embedding(
name_scope='embedding',
size=[dict_size, 32],
param_attr=weight_attr,
is_sparse=False)
rep1 = emb1(base.to_variable(inp_word))
rep2 = emb2(base.to_variable(inp_word))
self.assertFalse(np.array_equal(emb1.weight.numpy(), custom_weight))
self.assertTrue(np.array_equal(emb2.weight.numpy(), custom_weight))
self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy()))
emb2.weight.set_value(emb1.weight.numpy())
rep2 = emb2(base.to_variable(inp_word))
self.assertTrue(np.array_equal(rep1.numpy(), rep2.numpy()))
emb2.weight = emb1.weight
self.assertTrue(
np.array_equal(emb1.weight.numpy(), emb2.weight.numpy()))
def test_nce(self): def test_nce(self):
window_size = 5 window_size = 5
dict_size = 20 dict_size = 20
...@@ -695,6 +959,69 @@ class TestLayer(LayerTest): ...@@ -695,6 +959,69 @@ class TestLayer(LayerTest):
self.assertTrue(np.allclose(static_rlt2, static_rlt)) self.assertTrue(np.allclose(static_rlt2, static_rlt))
self.assertTrue(np.allclose(dy_rlt_value, static_rlt)) self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
with self.dynamic_graph(force_to_use_cpu=True):
custom_weight = np.random.randn(dict_size, 128).astype("float32")
weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
custom_weight))
words = []
for i in range(window_size):
words.append(base.to_variable(inp_word[i]))
sample_weights = layers.fill_constant(
shape=[5, 1], dtype='float32', value=1)
emb = nn.Embedding(
'embedding',
size=[dict_size, 32],
param_attr='emb.w',
is_sparse=False)
embs3 = []
for i in range(window_size):
if i == label_word:
continue
emb_rlt = emb(words[i])
embs3.append(emb_rlt)
embs3 = layers.concat(input=embs3, axis=1)
nce1 = nn.NCE('nce1',
num_total_classes=dict_size,
num_neg_samples=2,
sampler="custom_dist",
custom_dist=nid_freq_arr.tolist(),
seed=seed,
param_attr='nce1.w',
bias_attr='nce1.b',
sample_weight=sample_weights)
nce2 = nn.NCE('nce2',
param_attr=weight_attr,
num_total_classes=dict_size,
num_neg_samples=2,
sampler="custom_dist",
custom_dist=nid_freq_arr.tolist(),
seed=seed,
bias_attr='nce2.b',
sample_weight=sample_weights)
nce1_loss = nce1(embs3, words[label_word])
nce2_loss = nce2(embs3, words[label_word])
self.assertFalse(
np.array_equal(nce1_loss.numpy(), nce2_loss.numpy()))
nce2.weight.set_value(nce1.weight.numpy())
nce2.bias.set_value(nce1.bias)
nce1_loss = nce1(embs3, words[label_word])
nce2_loss = nce2(embs3, words[label_word])
self.assertTrue(
np.array_equal(nce1_loss.numpy(), nce2_loss.numpy()))
nce2.weight = nce1.weight
nce2.bias = nce1.bias
self.assertTrue(
np.array_equal(nce1.weight.numpy(), nce2.weight.numpy()))
self.assertTrue(
np.array_equal(nce1.bias.numpy(), nce2.bias.numpy()))
def test_conv3d(self): def test_conv3d(self):
with self.static_graph(): with self.static_graph():
images = layers.data( images = layers.data(
...@@ -724,6 +1051,38 @@ class TestLayer(LayerTest): ...@@ -724,6 +1051,38 @@ class TestLayer(LayerTest):
self.assertTrue(np.allclose(static_ret, dy_rlt_value)) self.assertTrue(np.allclose(static_ret, dy_rlt_value))
self.assertTrue(np.allclose(static_ret, static_ret2)) self.assertTrue(np.allclose(static_ret, static_ret2))
with self.dynamic_graph():
images = np.ones([2, 3, 6, 6, 6], dtype='float32')
custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
custom_weight))
conv3d1 = nn.Conv3D('conv3d1', num_filters=3, filter_size=2)
conv3d2 = nn.Conv3D(
'conv3d2', num_filters=3, filter_size=2, param_attr=weight_attr)
dy_ret1 = conv3d1(base.to_variable(images))
dy_ret2 = conv3d2(base.to_variable(images))
self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))
conv3d1_weight_np = conv3d1.weight.numpy()
conv3d1_bias = conv3d1.bias
self.assertFalse(
np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy()))
conv3d2.weight.set_value(conv3d1_weight_np)
self.assertTrue(
np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy()))
conv3d1.bias.set_value(conv3d1_bias)
dy_ret1 = conv3d1(base.to_variable(images))
dy_ret2 = conv3d2(base.to_variable(images))
self.assertTrue(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))
conv3d2.weight = conv3d1.weight
conv3d2.bias = conv3d1.bias
self.assertTrue(
np.array_equal(conv3d1.weight.numpy(), conv3d2.weight.numpy()))
self.assertTrue(
np.array_equal(conv3d1.bias.numpy(), conv3d2.bias.numpy()))
def test_row_conv(self): def test_row_conv(self):
input = np.arange(15).reshape([3, 5]).astype('float32') input = np.arange(15).reshape([3, 5]).astype('float32')
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
...@@ -943,6 +1302,45 @@ class TestLayer(LayerTest): ...@@ -943,6 +1302,45 @@ class TestLayer(LayerTest):
self.assertTrue(np.allclose(static_ret, static_ret2)) self.assertTrue(np.allclose(static_ret, static_ret2))
self.assertTrue(np.allclose(static_ret, dy_rlt_value)) self.assertTrue(np.allclose(static_ret, dy_rlt_value))
with self.dynamic_graph():
custom_weight = np.random.randn(5, 3, 6, 1).astype("float32")
weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
custom_weight))
treeConv1 = nn.TreeConv(
'SpectralNorm1',
output_size=6,
num_filters=1,
max_depth=2,
bias_attr='tc1_b')
treeConv2 = nn.TreeConv(
'SpectralNorm2',
output_size=6,
num_filters=1,
max_depth=2,
param_attr=weight_attr,
bias_attr='tc2_b')
dy_ret1 = treeConv1(
base.to_variable(vectors), base.to_variable(adj))
dy_ret2 = treeConv2(
base.to_variable(vectors), base.to_variable(adj))
self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))
treeConv2.weight.set_value(treeConv1.weight.numpy())
treeConv2.bias.set_value(treeConv1.bias)
dy_ret1 = treeConv1(
base.to_variable(vectors), base.to_variable(adj))
dy_ret2 = treeConv2(
base.to_variable(vectors), base.to_variable(adj))
self.assertTrue(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))
treeConv2.weight = treeConv1.weight
treeConv2.bias = treeConv1.bias
self.assertTrue(
np.array_equal(treeConv1.weight.numpy(),
treeConv2.weight.numpy()))
self.assertTrue(
np.array_equal(treeConv1.bias.numpy(), treeConv2.bias.numpy()))
def test_conv3d_transpose(self): def test_conv3d_transpose(self):
input_array = np.arange(0, 48).reshape( input_array = np.arange(0, 48).reshape(
[2, 3, 2, 2, 2]).astype('float32') [2, 3, 2, 2, 2]).astype('float32')
...@@ -974,6 +1372,48 @@ class TestLayer(LayerTest): ...@@ -974,6 +1372,48 @@ class TestLayer(LayerTest):
self.assertTrue(np.allclose(static_rlt2, static_rlt)) self.assertTrue(np.allclose(static_rlt2, static_rlt))
self.assertTrue(np.allclose(dy_rlt_value, static_rlt)) self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
with self.dynamic_graph():
images = np.ones([2, 3, 6, 6, 6], dtype='float32')
custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32")
weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
custom_weight))
conv3d1 = nn.Conv3DTranspose(
'conv3d1',
num_filters=3,
filter_size=2,
bias_attr='conv3d1_b',
use_cudnn=False)
conv3d2 = nn.Conv3DTranspose(
'conv3d2',
num_filters=3,
filter_size=2,
param_attr=weight_attr,
bias_attr='conv3d2_b',
use_cudnn=False)
dy_ret1 = conv3d1(base.to_variable(images))
dy_ret2 = conv3d2(base.to_variable(images))
self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))
conv3d1_weight_np = conv3d1.weight.numpy()
conv3d1_bias = conv3d1.bias
self.assertFalse(
np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy()))
conv3d2.weight.set_value(conv3d1_weight_np)
self.assertTrue(
np.array_equal(conv3d1_weight_np, conv3d2.weight.numpy()))
conv3d1.bias.set_value(conv3d1_bias)
dy_ret1 = conv3d1(base.to_variable(images))
dy_ret2 = conv3d2(base.to_variable(images))
self.assertTrue(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy()))
conv3d2.weight = conv3d1.weight
conv3d2.bias = conv3d1.bias
self.assertTrue(
np.array_equal(conv3d1.weight.numpy(), conv3d2.weight.numpy()))
self.assertTrue(
np.array_equal(conv3d1.bias.numpy(), conv3d2.bias.numpy()))
def test_eye_op(self): def test_eye_op(self):
np_eye = np.eye(3, 2) np_eye = np.eye(3, 2)
array_rlt1 = [np_eye for _ in range(3)] array_rlt1 = [np_eye for _ in range(3)]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册