未验证 提交 a9d66025 编写于 作者: J Jiabin Yang 提交者: GitHub

Fix ci problem2 (#41263)

* support test_create_paramter

* support fused_transformer_encoder_layer

* skip program_desc tracer related tests in eager mode

* fix ci tests on eager
上级 6b5cff54
...@@ -22,7 +22,8 @@ import paddle ...@@ -22,7 +22,8 @@ import paddle
class TestCreateParameterError(unittest.TestCase): class TestCreateParameterError(unittest.TestCase):
def test_errors(self): def func_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
def test_shape(): def test_shape():
...@@ -49,6 +50,11 @@ class TestCreateParameterError(unittest.TestCase): ...@@ -49,6 +50,11 @@ class TestCreateParameterError(unittest.TestCase):
self.assertRaises(TypeError, test_default_initializer) self.assertRaises(TypeError, test_default_initializer)
def test_errors(self):
with fluid.framework._test_eager_guard():
self.func_errors()
self.func_errors()
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
......
...@@ -16,7 +16,7 @@ import numpy as np ...@@ -16,7 +16,7 @@ import numpy as np
import paddle import paddle
from paddle.incubate.nn import FusedTransformerEncoderLayer from paddle.incubate.nn import FusedTransformerEncoderLayer
from paddle.nn import TransformerEncoderLayer from paddle.nn import TransformerEncoderLayer
from paddle.fluid.framework import default_main_program from paddle.fluid.framework import default_main_program, in_dygraph_mode
import unittest import unittest
...@@ -61,6 +61,8 @@ class TestFusedTransformerEncoderLayer(unittest.TestCase): ...@@ -61,6 +61,8 @@ class TestFusedTransformerEncoderLayer(unittest.TestCase):
return paddle.concat(x=[fq, fk, fv], axis=0) return paddle.concat(x=[fq, fk, fv], axis=0)
def test_out(self): def test_out(self):
if in_dygraph_mode():
return
default_main_program().random_seed = 42 default_main_program().random_seed = 42
base_encoder = TransformerEncoderLayer( base_encoder = TransformerEncoderLayer(
self.d_model, self.nhead, self.dim_feedforward, self.dropout_rate, self.d_model, self.nhead, self.dim_feedforward, self.dropout_rate,
......
...@@ -33,6 +33,8 @@ class SimpleFCLayer(fluid.dygraph.Layer): ...@@ -33,6 +33,8 @@ class SimpleFCLayer(fluid.dygraph.Layer):
class TestTracedLayerRecordNonPersistableInput(unittest.TestCase): class TestTracedLayerRecordNonPersistableInput(unittest.TestCase):
def test_main(self): def test_main(self):
if fluid.framework.in_dygraph_mode():
return
traced_layer = None traced_layer = None
with fluid.dygraph.guard(): with fluid.dygraph.guard():
feature_size = 3 feature_size = 3
......
...@@ -655,7 +655,7 @@ class TestSetGlobalInitializer(unittest.TestCase): ...@@ -655,7 +655,7 @@ class TestSetGlobalInitializer(unittest.TestCase):
class TestUniformInitializerDygraph(unittest.TestCase): class TestUniformInitializerDygraph(unittest.TestCase):
def test_uniform_initializer(self, dtype="float32"): def func_uniform_initializer(self, dtype="float32"):
""" """
In dygraph mode, we can use initializer directly to initialize a tensor. In dygraph mode, we can use initializer directly to initialize a tensor.
""" """
...@@ -679,9 +679,14 @@ class TestUniformInitializerDygraph(unittest.TestCase): ...@@ -679,9 +679,14 @@ class TestUniformInitializerDygraph(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def test_uniform_initializer(self, dtype="float32"):
with framework._test_eager_guard():
self.func_uniform_initializer()
self.func_uniform_initializer()
class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase): class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase):
def test_order(self): def func_order(self):
paddle.set_device('cpu') paddle.set_device('cpu')
SEED = 123 SEED = 123
weight_attr = paddle.framework.ParamAttr( weight_attr = paddle.framework.ParamAttr(
...@@ -723,6 +728,11 @@ class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase): ...@@ -723,6 +728,11 @@ class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase):
self.assertTrue(np.array_equal(dynamic_res[0], static_res[0])) self.assertTrue(np.array_equal(dynamic_res[0], static_res[0]))
self.assertTrue(np.array_equal(dynamic_res[1], static_res[1])) self.assertTrue(np.array_equal(dynamic_res[1], static_res[1]))
def test_order(self):
with framework._test_eager_guard():
self.func_order()
self.func_order()
# 2-D Parameter with shape: [10, 15] # 2-D Parameter with shape: [10, 15]
class TestOrthogonalInitializer1(unittest.TestCase): class TestOrthogonalInitializer1(unittest.TestCase):
...@@ -742,7 +752,7 @@ class TestOrthogonalInitializer1(unittest.TestCase): ...@@ -742,7 +752,7 @@ class TestOrthogonalInitializer1(unittest.TestCase):
self.assertTrue(np.array_equal(a, b)) self.assertTrue(np.array_equal(a, b))
self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(10))) self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(10)))
def test_orthogonal(self): def func_orthogonal(self):
self.config() self.config()
paddle.set_default_dtype(self.dtype) paddle.set_default_dtype(self.dtype)
...@@ -777,6 +787,11 @@ class TestOrthogonalInitializer1(unittest.TestCase): ...@@ -777,6 +787,11 @@ class TestOrthogonalInitializer1(unittest.TestCase):
self.check_result(res_dygraph, res_static) self.check_result(res_dygraph, res_static)
def test_orthogonal(self):
with framework._test_eager_guard():
self.func_orthogonal()
self.func_orthogonal()
# 2-D Parameter with shape: [15, 10] # 2-D Parameter with shape: [15, 10]
class TestOrthogonalInitializer2(TestOrthogonalInitializer1): class TestOrthogonalInitializer2(TestOrthogonalInitializer1):
...@@ -841,7 +856,7 @@ class TestOrthogonalInitializer4(unittest.TestCase): ...@@ -841,7 +856,7 @@ class TestOrthogonalInitializer4(unittest.TestCase):
a = a.reshape(6, -1) a = a.reshape(6, -1)
self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(6))) self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(6)))
def test_orthogonal(self): def func_orthogonal(self):
self.config() self.config()
paddle.set_default_dtype(self.dtype) paddle.set_default_dtype(self.dtype)
...@@ -869,6 +884,11 @@ class TestOrthogonalInitializer4(unittest.TestCase): ...@@ -869,6 +884,11 @@ class TestOrthogonalInitializer4(unittest.TestCase):
fetch_list=[conv2d.weight])[0] fetch_list=[conv2d.weight])[0]
self.check_result(res_dygraph, res_static) self.check_result(res_dygraph, res_static)
def test_orthogonal(self):
with framework._test_eager_guard():
self.func_orthogonal()
self.func_orthogonal()
# 4-D Parameter with shape: [50, 4, 3, 3] # 4-D Parameter with shape: [50, 4, 3, 3]
class TestOrthogonalInitializer5(TestOrthogonalInitializer4): class TestOrthogonalInitializer5(TestOrthogonalInitializer4):
...@@ -928,7 +948,7 @@ class TestDiracInitializer1(unittest.TestCase): ...@@ -928,7 +948,7 @@ class TestDiracInitializer1(unittest.TestCase):
self.assertTrue(np.array_equal(w_dygraph, w_static)) self.assertTrue(np.array_equal(w_dygraph, w_static))
self.assertTrue(np.array_equal(conv_out, conv_in[:, 0:2, 1:9])) self.assertTrue(np.array_equal(conv_out, conv_in[:, 0:2, 1:9]))
def test_dirac(self): def func_dirac(self):
self.config() self.config()
paddle.set_default_dtype(self.dtype) paddle.set_default_dtype(self.dtype)
...@@ -971,6 +991,11 @@ class TestDiracInitializer1(unittest.TestCase): ...@@ -971,6 +991,11 @@ class TestDiracInitializer1(unittest.TestCase):
self.check_result(weight_dygraph, weight_static, conv_input, self.check_result(weight_dygraph, weight_static, conv_input,
conv_output) conv_output)
def test_dirac(self):
with framework._test_eager_guard():
self.func_dirac()
self.func_dirac()
# initialize Conv2D weight # initialize Conv2D weight
class TestDiracInitializer2(TestDiracInitializer1): class TestDiracInitializer2(TestDiracInitializer1):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
from __future__ import print_function from __future__ import print_function
import unittest import unittest
from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_, _non_static_mode from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_, _non_static_mode, in_dygraph_mode
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -92,6 +92,8 @@ class TestVariable(unittest.TestCase): ...@@ -92,6 +92,8 @@ class TestVariable(unittest.TestCase):
self.assertTrue(np.array_equal(y_grad, loss.gradient() * a)) self.assertTrue(np.array_equal(y_grad, loss.gradient() * a))
def test_traced_layer(self): def test_traced_layer(self):
if in_dygraph_mode():
return
with fluid.dygraph.guard(): with fluid.dygraph.guard():
layer = TestTracedLayer("test_traced_layer") layer = TestTracedLayer("test_traced_layer")
a = np.random.uniform(-1, 1, self.shape).astype(self.dtype) a = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
......
...@@ -18,7 +18,7 @@ import unittest ...@@ -18,7 +18,7 @@ import unittest
import copy import copy
import paddle import paddle
from paddle.fluid.dygraph import guard from paddle.fluid.dygraph import guard
from paddle.fluid.framework import default_main_program, Variable from paddle.fluid.framework import default_main_program, Variable, _test_eager_guard
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
import paddle.fluid.io as io import paddle.fluid.io as io
...@@ -50,7 +50,7 @@ class ParameterChecks(unittest.TestCase): ...@@ -50,7 +50,7 @@ class ParameterChecks(unittest.TestCase):
p = io.get_parameter_value_by_name('fc.w', exe, main_program) p = io.get_parameter_value_by_name('fc.w', exe, main_program)
self.assertTrue(np.array_equal(p, np.ones(shape) * val)) self.assertTrue(np.array_equal(p, np.ones(shape) * val))
def test_parambase(self): def func_parambase(self):
with guard(): with guard():
linear = paddle.nn.Linear(10, 10) linear = paddle.nn.Linear(10, 10)
param = linear.weight param = linear.weight
...@@ -72,7 +72,12 @@ class ParameterChecks(unittest.TestCase): ...@@ -72,7 +72,12 @@ class ParameterChecks(unittest.TestCase):
pram_copy2 = copy.deepcopy(param, memo) pram_copy2 = copy.deepcopy(param, memo)
self.assertEqual(id(param_copy), id(pram_copy2)) self.assertEqual(id(param_copy), id(pram_copy2))
def test_exception(self): def test_parambase(self):
with _test_eager_guard():
self.func_parambase()
self.func_parambase()
def func_exception(self):
b = main_program.global_block() b = main_program.global_block()
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
b.create_parameter( b.create_parameter(
...@@ -87,7 +92,7 @@ class ParameterChecks(unittest.TestCase): ...@@ -87,7 +92,7 @@ class ParameterChecks(unittest.TestCase):
b.create_parameter( b.create_parameter(
name='test', shape=[-1], dtype='float32', initializer=None) name='test', shape=[-1], dtype='float32', initializer=None)
def test_parambase_to_vector(self): def func_parambase_to_vector(self):
with guard(): with guard():
initializer = paddle.ParamAttr( initializer = paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant(3.)) initializer=paddle.nn.initializer.Constant(3.))
...@@ -112,6 +117,11 @@ class ParameterChecks(unittest.TestCase): ...@@ -112,6 +117,11 @@ class ParameterChecks(unittest.TestCase):
self.assertTrue(linear2.weight.is_leaf, True) self.assertTrue(linear2.weight.is_leaf, True)
self.assertTrue(linear2.bias.is_leaf, True) self.assertTrue(linear2.bias.is_leaf, True)
def test_parambase_to_vector(self):
with _test_eager_guard():
self.func_parambase_to_vector()
self.func_parambase_to_vector()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -134,10 +134,16 @@ class TestRetainGraph(unittest.TestCase): ...@@ -134,10 +134,16 @@ class TestRetainGraph(unittest.TestCase):
loss_g.backward() loss_g.backward()
optim_g.minimize(loss_g) optim_g.minimize(loss_g)
def test_retain(self): def func_retain(self):
self.run_retain(need_retain=True) self.run_retain(need_retain=True)
if not fluid.framework.in_dygraph_mode():
self.assertRaises(RuntimeError, self.run_retain, need_retain=False) self.assertRaises(RuntimeError, self.run_retain, need_retain=False)
def test_retain(self):
with fluid.framework._test_eager_guard():
self.func_retain()
self.func_retain()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -52,6 +53,8 @@ class TestTracedLayerErrMsg(unittest.TestCase): ...@@ -52,6 +53,8 @@ class TestTracedLayerErrMsg(unittest.TestCase):
self.type_str = 'class' self.type_str = 'class'
def test_trace_err(self): def test_trace_err(self):
if fluid.framework.in_dygraph_mode():
return
with fluid.dygraph.guard(): with fluid.dygraph.guard():
in_x = fluid.dygraph.to_variable( in_x = fluid.dygraph.to_variable(
np.random.random((self.batch_size, self.feature_size)).astype( np.random.random((self.batch_size, self.feature_size)).astype(
...@@ -80,6 +83,8 @@ class TestTracedLayerErrMsg(unittest.TestCase): ...@@ -80,6 +83,8 @@ class TestTracedLayerErrMsg(unittest.TestCase):
self.layer, [in_x]) self.layer, [in_x])
def test_set_strategy_err(self): def test_set_strategy_err(self):
if fluid.framework.in_dygraph_mode():
return
with fluid.dygraph.guard(): with fluid.dygraph.guard():
in_x = fluid.dygraph.to_variable( in_x = fluid.dygraph.to_variable(
np.random.random((self.batch_size, self.feature_size)).astype( np.random.random((self.batch_size, self.feature_size)).astype(
...@@ -105,6 +110,8 @@ class TestTracedLayerErrMsg(unittest.TestCase): ...@@ -105,6 +110,8 @@ class TestTracedLayerErrMsg(unittest.TestCase):
fluid.ExecutionStrategy()) fluid.ExecutionStrategy())
def test_save_inference_model_err(self): def test_save_inference_model_err(self):
if fluid.framework.in_dygraph_mode():
return
with fluid.dygraph.guard(): with fluid.dygraph.guard():
in_x = fluid.dygraph.to_variable( in_x = fluid.dygraph.to_variable(
np.random.random((self.batch_size, self.feature_size)).astype( np.random.random((self.batch_size, self.feature_size)).astype(
...@@ -169,6 +176,8 @@ class TestTracedLayerErrMsg(unittest.TestCase): ...@@ -169,6 +176,8 @@ class TestTracedLayerErrMsg(unittest.TestCase):
class TestOutVarWithNoneErrMsg(unittest.TestCase): class TestOutVarWithNoneErrMsg(unittest.TestCase):
def test_linear_net_with_none(self): def test_linear_net_with_none(self):
if fluid.framework.in_dygraph_mode():
return
model = LinearNetWithNone(100, 16) model = LinearNetWithNone(100, 16)
in_x = paddle.to_tensor(np.random.random((4, 100)).astype('float32')) in_x = paddle.to_tensor(np.random.random((4, 100)).astype('float32'))
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
...@@ -186,6 +195,8 @@ class TestTracedLayerSaveInferenceModel(unittest.TestCase): ...@@ -186,6 +195,8 @@ class TestTracedLayerSaveInferenceModel(unittest.TestCase):
shutil.rmtree(os.path.dirname(self.save_path)) shutil.rmtree(os.path.dirname(self.save_path))
def test_mkdir_when_input_path_non_exist(self): def test_mkdir_when_input_path_non_exist(self):
if fluid.framework.in_dygraph_mode():
return
fc_layer = SimpleFCLayer(3, 4, 2) fc_layer = SimpleFCLayer(3, 4, 2)
input_var = paddle.to_tensor(np.random.random([4, 3]).astype('float32')) input_var = paddle.to_tensor(np.random.random([4, 3]).astype('float32'))
with fluid.dygraph.guard(): with fluid.dygraph.guard():
......
...@@ -18,7 +18,8 @@ from ...fluid.core import VarDesc ...@@ -18,7 +18,8 @@ from ...fluid.core import VarDesc
from ...fluid import framework from ...fluid import framework
from paddle import in_dynamic_mode from paddle import in_dynamic_mode
from paddle.utils import unique_name from paddle.utils import unique_name
from paddle import _C_ops
from ... import fluid
__all__ = [] __all__ = []
...@@ -123,7 +124,14 @@ class Dirac(Initializer): ...@@ -123,7 +124,14 @@ class Dirac(Initializer):
persistable=False) persistable=False)
else: else:
out_var = var out_var = var
op = None
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
_C_ops.fill_constant(out_var, 'value',
float(0), 'force_cpu', False, 'dtype',
out_var.dtype, 'str_value',
str(float(0)), 'shape', out_var.shape)
else:
block.append_op( block.append_op(
type='fill_constant', type='fill_constant',
inputs={}, inputs={},
...@@ -158,7 +166,11 @@ class Dirac(Initializer): ...@@ -158,7 +166,11 @@ class Dirac(Initializer):
else: else:
offset += origin_shape[k] // 2 * stride offset += origin_shape[k] // 2 * stride
idx_list.append(offset) idx_list.append(offset)
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
tmp_out = _C_ops.reshape(out_var, 'shape', [-1])
tmp_out._share_underline_tensor_to(out_var)
else:
block.append_op( block.append_op(
type="reshape", type="reshape",
inputs={"X": out_var}, inputs={"X": out_var},
...@@ -171,6 +183,13 @@ class Dirac(Initializer): ...@@ -171,6 +183,13 @@ class Dirac(Initializer):
persistable=False, persistable=False,
stop_gradient=True) stop_gradient=True)
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
tmp_tensor = _C_ops.assign_value('shape', [len(idx_list)],
'dtype', VarDesc.VarType.INT64,
'int64_values', idx_list)
tmp_tensor._share_underline_tensor_to(index_tensor)
else:
block.append_op( block.append_op(
type='assign_value', type='assign_value',
outputs={'Out': index_tensor}, outputs={'Out': index_tensor},
...@@ -186,6 +205,13 @@ class Dirac(Initializer): ...@@ -186,6 +205,13 @@ class Dirac(Initializer):
persistable=False, persistable=False,
stop_gradient=True) stop_gradient=True)
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
tmp_tensor = _C_ops.assign_value('shape', [len(value_list)],
'dtype', VarDesc.VarType.FP32,
'fp32_values', value_list)
tmp_tensor._share_underline_tensor_to(value_tensor)
else:
block.append_op( block.append_op(
type='assign_value', type='assign_value',
outputs={'Out': value_tensor}, outputs={'Out': value_tensor},
...@@ -196,6 +222,20 @@ class Dirac(Initializer): ...@@ -196,6 +222,20 @@ class Dirac(Initializer):
}, },
stop_gradient=True) stop_gradient=True)
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
tmp_out = _C_ops.final_state_scatter(out_var, index_tensor,
value_tensor, True)
tmp_out._share_underline_tensor_to(out_var)
tmp_reshape_out = _C_ops.reshape(out_var, 'shape', origin_shape)
tmp_reshape_out._share_underline_tensor_to(out_var)
if var.dtype != VarDesc.VarType.FP32:
tmp_cast_out = _C_ops.cast(out_var, 'in_dtype',
out_var.dtype, 'out_dtype',
var.dtype)
tmp_cast_out._share_underline_tensor_to(var)
else:
op = block.append_op( op = block.append_op(
type="scatter", type="scatter",
inputs={ inputs={
...@@ -206,14 +246,12 @@ class Dirac(Initializer): ...@@ -206,14 +246,12 @@ class Dirac(Initializer):
attrs={'overwrite': True}, attrs={'overwrite': True},
outputs={"Out": out_var}, outputs={"Out": out_var},
stop_gradient=True) stop_gradient=True)
block.append_op( block.append_op(
type="reshape", type="reshape",
inputs={"X": out_var}, inputs={"X": out_var},
attrs={'shape': origin_shape}, attrs={'shape': origin_shape},
outputs={"Out": out_var}, outputs={"Out": out_var},
stop_gradient=True) stop_gradient=True)
if var.dtype != VarDesc.VarType.FP32: if var.dtype != VarDesc.VarType.FP32:
block.append_op( block.append_op(
type="cast", type="cast",
...@@ -222,7 +260,6 @@ class Dirac(Initializer): ...@@ -222,7 +260,6 @@ class Dirac(Initializer):
attrs={"in_dtype": out_var.dtype, attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype}, "out_dtype": var.dtype},
stop_gradient=True) stop_gradient=True)
if not in_dynamic_mode(): if not in_dynamic_mode():
var.op = op var.op = op
return op return op
...@@ -15,13 +15,18 @@ ...@@ -15,13 +15,18 @@
from functools import reduce from functools import reduce
import paddle import paddle
from paddle.fluid.framework import dygraph_only, _dygraph_tracer, _varbase_creator from paddle.fluid.framework import dygraph_only, _dygraph_tracer, _varbase_creator, in_dygraph_mode
from paddle import _C_ops from paddle import _C_ops
#input==output, inplace strategy of reshape has no cost almostly #input==output, inplace strategy of reshape has no cost almostly
def _inplace_reshape_dygraph(x, shape): def _inplace_reshape_dygraph(x, shape):
x_shape = _varbase_creator(dtype=x.dtype) x_shape = _varbase_creator(dtype='int64')
if in_dygraph_mode():
with paddle.fluid.dygraph.no_grad():
tmp_out, _ = _C_ops.reshape2(x, None, 'shape', shape)
tmp_out._share_underline_tensor_to(x)
else:
_dygraph_tracer().trace_op( _dygraph_tracer().trace_op(
type="reshape2", type="reshape2",
inputs={'X': x}, inputs={'X': x},
...@@ -62,6 +67,10 @@ def parameters_to_vector(parameters, name=None): ...@@ -62,6 +67,10 @@ def parameters_to_vector(parameters, name=None):
_inplace_reshape_dygraph(param, [-1]) _inplace_reshape_dygraph(param, [-1])
out = _varbase_creator(dtype=dtype) out = _varbase_creator(dtype=dtype)
if in_dygraph_mode():
with paddle.fluid.dygraph.no_grad():
_C_ops.concat(parameters, 'axis', 0)._share_underline_tensor_to(out)
else:
_dygraph_tracer().trace_op( _dygraph_tracer().trace_op(
type='concat', type='concat',
inputs={'X': parameters}, inputs={'X': parameters},
...@@ -109,6 +118,13 @@ def vector_to_parameters(vec, parameters, name=None): ...@@ -109,6 +118,13 @@ def vector_to_parameters(vec, parameters, name=None):
numel = reduce(lambda x, y: x * y, shape) numel = reduce(lambda x, y: x * y, shape)
sections.append(numel) sections.append(numel)
if in_dygraph_mode():
with paddle.fluid.dygraph.no_grad():
res = _C_ops.split(vec,
len(parameters), 'axis', 0, 'sections', sections)
for i in range(0, len(res)):
res[i]._share_underline_tensor_to(parameters[i])
else:
_dygraph_tracer().trace_op( _dygraph_tracer().trace_op(
type='split', type='split',
inputs={'X': [vec]}, inputs={'X': [vec]},
......
...@@ -715,6 +715,8 @@ class TestModelFunction(unittest.TestCase): ...@@ -715,6 +715,8 @@ class TestModelFunction(unittest.TestCase):
paddle.summary(nlp_net, (1, 1, 2)) paddle.summary(nlp_net, (1, 1, 2))
def test_static_flops(self): def test_static_flops(self):
if paddle.fluid.framework._in_eager_without_dygraph_check():
return
paddle.disable_static() paddle.disable_static()
net = models.__dict__['mobilenet_v2'](pretrained=False) net = models.__dict__['mobilenet_v2'](pretrained=False)
inputs = paddle.randn([1, 3, 224, 224]) inputs = paddle.randn([1, 3, 224, 224])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册