未验证 提交 a9d66025 编写于 作者: J Jiabin Yang 提交者: GitHub

Fix ci problem2 (#41263)

* support test_create_paramter

* support fused_transformer_encoder_layer

* skip program_desc tracer related tests in eager mode

* fix ci tests on eager
上级 6b5cff54
......@@ -22,7 +22,8 @@ import paddle
class TestCreateParameterError(unittest.TestCase):
def test_errors(self):
def func_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()):
def test_shape():
......@@ -49,6 +50,11 @@ class TestCreateParameterError(unittest.TestCase):
self.assertRaises(TypeError, test_default_initializer)
def test_errors(self):
with fluid.framework._test_eager_guard():
self.func_errors()
self.func_errors()
if __name__ == '__main__':
paddle.enable_static()
......
......@@ -16,7 +16,7 @@ import numpy as np
import paddle
from paddle.incubate.nn import FusedTransformerEncoderLayer
from paddle.nn import TransformerEncoderLayer
from paddle.fluid.framework import default_main_program
from paddle.fluid.framework import default_main_program, in_dygraph_mode
import unittest
......@@ -61,6 +61,8 @@ class TestFusedTransformerEncoderLayer(unittest.TestCase):
return paddle.concat(x=[fq, fk, fv], axis=0)
def test_out(self):
if in_dygraph_mode():
return
default_main_program().random_seed = 42
base_encoder = TransformerEncoderLayer(
self.d_model, self.nhead, self.dim_feedforward, self.dropout_rate,
......
......@@ -33,6 +33,8 @@ class SimpleFCLayer(fluid.dygraph.Layer):
class TestTracedLayerRecordNonPersistableInput(unittest.TestCase):
def test_main(self):
if fluid.framework.in_dygraph_mode():
return
traced_layer = None
with fluid.dygraph.guard():
feature_size = 3
......
......@@ -655,7 +655,7 @@ class TestSetGlobalInitializer(unittest.TestCase):
class TestUniformInitializerDygraph(unittest.TestCase):
def test_uniform_initializer(self, dtype="float32"):
def func_uniform_initializer(self, dtype="float32"):
"""
In dygraph mode, we can use initializer directly to initialize a tensor.
"""
......@@ -679,9 +679,14 @@ class TestUniformInitializerDygraph(unittest.TestCase):
paddle.enable_static()
def test_uniform_initializer(self, dtype="float32"):
with framework._test_eager_guard():
self.func_uniform_initializer()
self.func_uniform_initializer()
class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase):
def test_order(self):
def func_order(self):
paddle.set_device('cpu')
SEED = 123
weight_attr = paddle.framework.ParamAttr(
......@@ -723,6 +728,11 @@ class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase):
self.assertTrue(np.array_equal(dynamic_res[0], static_res[0]))
self.assertTrue(np.array_equal(dynamic_res[1], static_res[1]))
def test_order(self):
with framework._test_eager_guard():
self.func_order()
self.func_order()
# 2-D Parameter with shape: [10, 15]
class TestOrthogonalInitializer1(unittest.TestCase):
......@@ -742,7 +752,7 @@ class TestOrthogonalInitializer1(unittest.TestCase):
self.assertTrue(np.array_equal(a, b))
self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(10)))
def test_orthogonal(self):
def func_orthogonal(self):
self.config()
paddle.set_default_dtype(self.dtype)
......@@ -777,6 +787,11 @@ class TestOrthogonalInitializer1(unittest.TestCase):
self.check_result(res_dygraph, res_static)
def test_orthogonal(self):
with framework._test_eager_guard():
self.func_orthogonal()
self.func_orthogonal()
# 2-D Parameter with shape: [15, 10]
class TestOrthogonalInitializer2(TestOrthogonalInitializer1):
......@@ -841,7 +856,7 @@ class TestOrthogonalInitializer4(unittest.TestCase):
a = a.reshape(6, -1)
self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(6)))
def test_orthogonal(self):
def func_orthogonal(self):
self.config()
paddle.set_default_dtype(self.dtype)
......@@ -869,6 +884,11 @@ class TestOrthogonalInitializer4(unittest.TestCase):
fetch_list=[conv2d.weight])[0]
self.check_result(res_dygraph, res_static)
def test_orthogonal(self):
with framework._test_eager_guard():
self.func_orthogonal()
self.func_orthogonal()
# 4-D Parameter with shape: [50, 4, 3, 3]
class TestOrthogonalInitializer5(TestOrthogonalInitializer4):
......@@ -928,7 +948,7 @@ class TestDiracInitializer1(unittest.TestCase):
self.assertTrue(np.array_equal(w_dygraph, w_static))
self.assertTrue(np.array_equal(conv_out, conv_in[:, 0:2, 1:9]))
def test_dirac(self):
def func_dirac(self):
self.config()
paddle.set_default_dtype(self.dtype)
......@@ -971,6 +991,11 @@ class TestDiracInitializer1(unittest.TestCase):
self.check_result(weight_dygraph, weight_static, conv_input,
conv_output)
def test_dirac(self):
with framework._test_eager_guard():
self.func_dirac()
self.func_dirac()
# initialize Conv2D weight
class TestDiracInitializer2(TestDiracInitializer1):
......
......@@ -15,7 +15,7 @@
from __future__ import print_function
import unittest
from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_, _non_static_mode
from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_, _non_static_mode, in_dygraph_mode
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
......@@ -92,6 +92,8 @@ class TestVariable(unittest.TestCase):
self.assertTrue(np.array_equal(y_grad, loss.gradient() * a))
def test_traced_layer(self):
if in_dygraph_mode():
return
with fluid.dygraph.guard():
layer = TestTracedLayer("test_traced_layer")
a = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
......
......@@ -18,7 +18,7 @@ import unittest
import copy
import paddle
from paddle.fluid.dygraph import guard
from paddle.fluid.framework import default_main_program, Variable
from paddle.fluid.framework import default_main_program, Variable, _test_eager_guard
import paddle.fluid.core as core
from paddle.fluid.executor import Executor
import paddle.fluid.io as io
......@@ -50,7 +50,7 @@ class ParameterChecks(unittest.TestCase):
p = io.get_parameter_value_by_name('fc.w', exe, main_program)
self.assertTrue(np.array_equal(p, np.ones(shape) * val))
def test_parambase(self):
def func_parambase(self):
with guard():
linear = paddle.nn.Linear(10, 10)
param = linear.weight
......@@ -72,7 +72,12 @@ class ParameterChecks(unittest.TestCase):
pram_copy2 = copy.deepcopy(param, memo)
self.assertEqual(id(param_copy), id(pram_copy2))
def test_exception(self):
def test_parambase(self):
with _test_eager_guard():
self.func_parambase()
self.func_parambase()
def func_exception(self):
b = main_program.global_block()
with self.assertRaises(ValueError):
b.create_parameter(
......@@ -87,7 +92,7 @@ class ParameterChecks(unittest.TestCase):
b.create_parameter(
name='test', shape=[-1], dtype='float32', initializer=None)
def test_parambase_to_vector(self):
def func_parambase_to_vector(self):
with guard():
initializer = paddle.ParamAttr(
initializer=paddle.nn.initializer.Constant(3.))
......@@ -112,6 +117,11 @@ class ParameterChecks(unittest.TestCase):
self.assertTrue(linear2.weight.is_leaf, True)
self.assertTrue(linear2.bias.is_leaf, True)
def test_parambase_to_vector(self):
with _test_eager_guard():
self.func_parambase_to_vector()
self.func_parambase_to_vector()
if __name__ == '__main__':
unittest.main()
......@@ -134,9 +134,15 @@ class TestRetainGraph(unittest.TestCase):
loss_g.backward()
optim_g.minimize(loss_g)
def test_retain(self):
def func_retain(self):
self.run_retain(need_retain=True)
self.assertRaises(RuntimeError, self.run_retain, need_retain=False)
if not fluid.framework.in_dygraph_mode():
self.assertRaises(RuntimeError, self.run_retain, need_retain=False)
def test_retain(self):
with fluid.framework._test_eager_guard():
self.func_retain()
self.func_retain()
if __name__ == '__main__':
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -52,6 +53,8 @@ class TestTracedLayerErrMsg(unittest.TestCase):
self.type_str = 'class'
def test_trace_err(self):
if fluid.framework.in_dygraph_mode():
return
with fluid.dygraph.guard():
in_x = fluid.dygraph.to_variable(
np.random.random((self.batch_size, self.feature_size)).astype(
......@@ -80,6 +83,8 @@ class TestTracedLayerErrMsg(unittest.TestCase):
self.layer, [in_x])
def test_set_strategy_err(self):
if fluid.framework.in_dygraph_mode():
return
with fluid.dygraph.guard():
in_x = fluid.dygraph.to_variable(
np.random.random((self.batch_size, self.feature_size)).astype(
......@@ -105,6 +110,8 @@ class TestTracedLayerErrMsg(unittest.TestCase):
fluid.ExecutionStrategy())
def test_save_inference_model_err(self):
if fluid.framework.in_dygraph_mode():
return
with fluid.dygraph.guard():
in_x = fluid.dygraph.to_variable(
np.random.random((self.batch_size, self.feature_size)).astype(
......@@ -169,6 +176,8 @@ class TestTracedLayerErrMsg(unittest.TestCase):
class TestOutVarWithNoneErrMsg(unittest.TestCase):
def test_linear_net_with_none(self):
if fluid.framework.in_dygraph_mode():
return
model = LinearNetWithNone(100, 16)
in_x = paddle.to_tensor(np.random.random((4, 100)).astype('float32'))
with self.assertRaises(TypeError):
......@@ -186,6 +195,8 @@ class TestTracedLayerSaveInferenceModel(unittest.TestCase):
shutil.rmtree(os.path.dirname(self.save_path))
def test_mkdir_when_input_path_non_exist(self):
if fluid.framework.in_dygraph_mode():
return
fc_layer = SimpleFCLayer(3, 4, 2)
input_var = paddle.to_tensor(np.random.random([4, 3]).astype('float32'))
with fluid.dygraph.guard():
......
......@@ -18,7 +18,8 @@ from ...fluid.core import VarDesc
from ...fluid import framework
from paddle import in_dynamic_mode
from paddle.utils import unique_name
from paddle import _C_ops
from ... import fluid
__all__ = []
......@@ -123,17 +124,24 @@ class Dirac(Initializer):
persistable=False)
else:
out_var = var
block.append_op(
type='fill_constant',
inputs={},
outputs={'Out': out_var},
attrs={
'value': float(0),
'dtype': out_var.dtype,
'shape': out_var.shape,
},
stop_gradient=True)
op = None
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
_C_ops.fill_constant(out_var, 'value',
float(0), 'force_cpu', False, 'dtype',
out_var.dtype, 'str_value',
str(float(0)), 'shape', out_var.shape)
else:
block.append_op(
type='fill_constant',
inputs={},
outputs={'Out': out_var},
attrs={
'value': float(0),
'dtype': out_var.dtype,
'shape': out_var.shape,
},
stop_gradient=True)
origin_shape = var.shape
num_per_group = origin_shape[0] // self._groups
......@@ -158,71 +166,100 @@ class Dirac(Initializer):
else:
offset += origin_shape[k] // 2 * stride
idx_list.append(offset)
block.append_op(
type="reshape",
inputs={"X": out_var},
attrs={'shape': [-1]},
outputs={"Out": out_var},
stop_gradient=True)
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
tmp_out = _C_ops.reshape(out_var, 'shape', [-1])
tmp_out._share_underline_tensor_to(out_var)
else:
block.append_op(
type="reshape",
inputs={"X": out_var},
attrs={'shape': [-1]},
outputs={"Out": out_var},
stop_gradient=True)
index_tensor = block.create_var(
name=unique_name.generate('scatter_index'),
persistable=False,
stop_gradient=True)
block.append_op(
type='assign_value',
outputs={'Out': index_tensor},
attrs={
'dtype': VarDesc.VarType.INT64,
'shape': [len(idx_list)],
'int64_values': idx_list
},
stop_gradient=True)
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
tmp_tensor = _C_ops.assign_value('shape', [len(idx_list)],
'dtype', VarDesc.VarType.INT64,
'int64_values', idx_list)
tmp_tensor._share_underline_tensor_to(index_tensor)
else:
block.append_op(
type='assign_value',
outputs={'Out': index_tensor},
attrs={
'dtype': VarDesc.VarType.INT64,
'shape': [len(idx_list)],
'int64_values': idx_list
},
stop_gradient=True)
value_tensor = block.create_var(
name=unique_name.generate('scatter_value'),
persistable=False,
stop_gradient=True)
block.append_op(
type='assign_value',
outputs={'Out': value_tensor},
attrs={
'dtype': VarDesc.VarType.FP32,
'shape': [len(value_list)],
'fp32_values': value_list
},
stop_gradient=True)
op = block.append_op(
type="scatter",
inputs={
"X": out_var,
"Ids": index_tensor,
"Updates": value_tensor
},
attrs={'overwrite': True},
outputs={"Out": out_var},
stop_gradient=True)
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
tmp_tensor = _C_ops.assign_value('shape', [len(value_list)],
'dtype', VarDesc.VarType.FP32,
'fp32_values', value_list)
tmp_tensor._share_underline_tensor_to(value_tensor)
else:
block.append_op(
type='assign_value',
outputs={'Out': value_tensor},
attrs={
'dtype': VarDesc.VarType.FP32,
'shape': [len(value_list)],
'fp32_values': value_list
},
stop_gradient=True)
block.append_op(
type="reshape",
inputs={"X": out_var},
attrs={'shape': origin_shape},
outputs={"Out": out_var},
stop_gradient=True)
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
tmp_out = _C_ops.final_state_scatter(out_var, index_tensor,
value_tensor, True)
tmp_out._share_underline_tensor_to(out_var)
tmp_reshape_out = _C_ops.reshape(out_var, 'shape', origin_shape)
tmp_reshape_out._share_underline_tensor_to(out_var)
if var.dtype != VarDesc.VarType.FP32:
tmp_cast_out = _C_ops.cast(out_var, 'in_dtype',
out_var.dtype, 'out_dtype',
var.dtype)
tmp_cast_out._share_underline_tensor_to(var)
if var.dtype != VarDesc.VarType.FP32:
else:
op = block.append_op(
type="scatter",
inputs={
"X": out_var,
"Ids": index_tensor,
"Updates": value_tensor
},
attrs={'overwrite': True},
outputs={"Out": out_var},
stop_gradient=True)
block.append_op(
type="cast",
type="reshape",
inputs={"X": out_var},
outputs={"Out": var},
attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype},
attrs={'shape': origin_shape},
outputs={"Out": out_var},
stop_gradient=True)
if var.dtype != VarDesc.VarType.FP32:
block.append_op(
type="cast",
inputs={"X": out_var},
outputs={"Out": var},
attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype},
stop_gradient=True)
if not in_dynamic_mode():
var.op = op
return op
......@@ -15,20 +15,25 @@
from functools import reduce
import paddle
from paddle.fluid.framework import dygraph_only, _dygraph_tracer, _varbase_creator
from paddle.fluid.framework import dygraph_only, _dygraph_tracer, _varbase_creator, in_dygraph_mode
from paddle import _C_ops
#input==output, inplace strategy of reshape has no cost almostly
def _inplace_reshape_dygraph(x, shape):
x_shape = _varbase_creator(dtype=x.dtype)
_dygraph_tracer().trace_op(
type="reshape2",
inputs={'X': x},
outputs={'Out': x,
'XShape': x_shape},
attrs={'shape': shape},
stop_gradient=True)
x_shape = _varbase_creator(dtype='int64')
if in_dygraph_mode():
with paddle.fluid.dygraph.no_grad():
tmp_out, _ = _C_ops.reshape2(x, None, 'shape', shape)
tmp_out._share_underline_tensor_to(x)
else:
_dygraph_tracer().trace_op(
type="reshape2",
inputs={'X': x},
outputs={'Out': x,
'XShape': x_shape},
attrs={'shape': shape},
stop_gradient=True)
@dygraph_only
......@@ -62,12 +67,16 @@ def parameters_to_vector(parameters, name=None):
_inplace_reshape_dygraph(param, [-1])
out = _varbase_creator(dtype=dtype)
_dygraph_tracer().trace_op(
type='concat',
inputs={'X': parameters},
outputs={'Out': [out]},
attrs={'axis': 0},
stop_gradient=True)
if in_dygraph_mode():
with paddle.fluid.dygraph.no_grad():
_C_ops.concat(parameters, 'axis', 0)._share_underline_tensor_to(out)
else:
_dygraph_tracer().trace_op(
type='concat',
inputs={'X': parameters},
outputs={'Out': [out]},
attrs={'axis': 0},
stop_gradient=True)
for i, param in enumerate(parameters):
_inplace_reshape_dygraph(param, origin_shapes[i])
return out
......@@ -109,13 +118,20 @@ def vector_to_parameters(vec, parameters, name=None):
numel = reduce(lambda x, y: x * y, shape)
sections.append(numel)
_dygraph_tracer().trace_op(
type='split',
inputs={'X': [vec]},
outputs={'Out': parameters},
attrs={'axis': 0,
'sections': sections},
stop_gradient=True)
if in_dygraph_mode():
with paddle.fluid.dygraph.no_grad():
res = _C_ops.split(vec,
len(parameters), 'axis', 0, 'sections', sections)
for i in range(0, len(res)):
res[i]._share_underline_tensor_to(parameters[i])
else:
_dygraph_tracer().trace_op(
type='split',
inputs={'X': [vec]},
outputs={'Out': parameters},
attrs={'axis': 0,
'sections': sections},
stop_gradient=True)
for i, param in enumerate(parameters):
_inplace_reshape_dygraph(param, origin_shapes[i])
......
......@@ -715,6 +715,8 @@ class TestModelFunction(unittest.TestCase):
paddle.summary(nlp_net, (1, 1, 2))
def test_static_flops(self):
if paddle.fluid.framework._in_eager_without_dygraph_check():
return
paddle.disable_static()
net = models.__dict__['mobilenet_v2'](pretrained=False)
inputs = paddle.randn([1, 3, 224, 224])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册