diff --git a/paddle/fluid/framework/ir/graph_test.cc b/paddle/fluid/framework/ir/graph_test.cc index b1b8d1c586c98a327a8e5b4890ced00022155e6b..cadda49c399a6d65079cacedfea61f4fd580a69a 100644 --- a/paddle/fluid/framework/ir/graph_test.cc +++ b/paddle/fluid/framework/ir/graph_test.cc @@ -200,9 +200,11 @@ TEST(GraphTest, WriteAfterWrite) { ASSERT_TRUE(ir::IsControlDepVar(*n->inputs[1])); control_dep2 = n->inputs[1]; ASSERT_EQ(n->inputs.size(), 2); - ASSERT_EQ(control_dep1, control_dep2); } } + ASSERT_NE(control_dep1, nullptr); + ASSERT_NE(control_dep2, nullptr); + ASSERT_EQ(control_dep1, control_dep2); } } // namespace framework } // namespace paddle diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 8460f93b841fe136db138e0dc7576f3aacdbeb5f..752c20fa0f244cfd40509b63bc32e377236df588 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -313,7 +313,18 @@ function run_test() { Running unit tests ... ======================================== EOF - ctest --output-on-failure + ctest --output-on-failure -R graph_test -V + ctest --output-on-failure -R test_prelu_op -V + ctest --output-on-failure -R test_prelu_op -V + ctest --output-on-failure -R test_dist_transpiler -V + ctest --output-on-failure -R test_dist_word2vec -V + ctest --output-on-failure -R test_desc_clone -V + ctest --output-on-failure -R test_dist_mnist -V + ctest --output-on-failure -R test_listen_and_serv_op -V + ctest --output-on-failure -R test_debugger -V + ctest --output-on-failure -R test_dist_transformer -V + ctest --output-on-failure -R test_dist_se_resnext -V + # make install should also be test when unittest make install -j `nproc` pip install /usr/local/opt/paddle/share/wheels/*.whl diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 972e44c9528a29417d9689dcb2408b9381346f31..511f88bd30b0fe8509006bf9c307c3810e910ebd 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -123,7 +123,7 @@ def get_numeric_gradient(place, y_neg = get_output() __set_elem__(tensor_to_check, i, origin) - gradient_flat[i] = (y_pos - y_neg) / delta / 2 + gradient_flat[i] = (y_pos - y_neg) / delta // 2 return gradient_flat.reshape(tensor_to_check.shape()) diff --git a/python/paddle/fluid/tests/unittests/test_desc_clone.py b/python/paddle/fluid/tests/unittests/test_desc_clone.py index 88d44e453c7976f5e0fbda2c0871dfabd4bb30aa..aca2911482cb454f67ec31ca9867fd125ef7dcbd 100644 --- a/python/paddle/fluid/tests/unittests/test_desc_clone.py +++ b/python/paddle/fluid/tests/unittests/test_desc_clone.py @@ -27,6 +27,7 @@ import unittest from multiprocessing import Process import os import signal +import six import collections SEED = 1 @@ -55,7 +56,8 @@ def cnn_model(data): # TODO(dzhwinter) : refine the initializer and random seed settting SIZE = 10 input_shape = conv_pool_2.shape - param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] + param_shape = [six.moves.reduce(lambda a, b: a * b, input_shape[1:], 1) + ] + [SIZE] scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 predict = fluid.layers.fc( diff --git a/python/paddle/fluid/tests/unittests/test_prelu_op.py b/python/paddle/fluid/tests/unittests/test_prelu_op.py index 979be5af3bdc24b1a2fc115198eeab53469a91c0..3f006553846347f33b54d92d6b2475339aeca62a 100644 --- a/python/paddle/fluid/tests/unittests/test_prelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_prelu_op.py @@ -39,10 +39,17 @@ class PReluTest(OpTest): alpha_np = np.random.rand(*x_np.shape).astype("float32") self.inputs = {'X': x_np, 'Alpha': alpha_np} + import sys + print('self.inputs', self.inputs) + sys.stdout.flush() + out_np = np.maximum(self.inputs['X'], 0.) out_np = out_np + np.minimum(self.inputs['X'], 0.) * self.inputs['Alpha'] assert out_np is not self.inputs['X'] + import sys + print('self.outputs', self.outputs) + sys.stdout.flush() self.outputs = {'Out': out_np} def initTestCase(self): diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 57bc2e8a0ba173bb1273a5183340d0b618f0d73c..69c0fbbfe0fd680ea91165c541df1cd5ddeb3515 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -369,7 +369,7 @@ class DistributeTranspiler(object): # FIXME(gongwb): delete not need ops. # note that: some parameter is not trainable and those ops can't be deleted. - for varname, splited_var in self.param_var_mapping.iteritems(): + for varname, splited_var in six.iteritems(self.param_var_mapping): # Get the eplist of recv vars eps = [] for var in splited_var: @@ -406,7 +406,7 @@ class DistributeTranspiler(object): RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) - for varname, splited_var in self.param_var_mapping.iteritems(): + for varname, splited_var in six.iteritems(self.param_var_mapping): #add concat ops to merge splited parameters received from parameter servers. if len(splited_var) <= 1: continue