提交 beb93bb9 编写于 作者: M minqiyang

Fix ut bug for graph_test

Port dist_transpiler new added codes
Port ut for clone desc
上级 2906d839
...@@ -200,9 +200,11 @@ TEST(GraphTest, WriteAfterWrite) { ...@@ -200,9 +200,11 @@ TEST(GraphTest, WriteAfterWrite) {
ASSERT_TRUE(ir::IsControlDepVar(*n->inputs[1])); ASSERT_TRUE(ir::IsControlDepVar(*n->inputs[1]));
control_dep2 = n->inputs[1]; control_dep2 = n->inputs[1];
ASSERT_EQ(n->inputs.size(), 2); ASSERT_EQ(n->inputs.size(), 2);
ASSERT_EQ(control_dep1, control_dep2);
} }
} }
ASSERT_NE(control_dep1, nullptr);
ASSERT_NE(control_dep2, nullptr);
ASSERT_EQ(control_dep1, control_dep2);
} }
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -313,7 +313,18 @@ function run_test() { ...@@ -313,7 +313,18 @@ function run_test() {
Running unit tests ... Running unit tests ...
======================================== ========================================
EOF EOF
ctest --output-on-failure ctest --output-on-failure -R graph_test -V
ctest --output-on-failure -R test_prelu_op -V
ctest --output-on-failure -R test_prelu_op -V
ctest --output-on-failure -R test_dist_transpiler -V
ctest --output-on-failure -R test_dist_word2vec -V
ctest --output-on-failure -R test_desc_clone -V
ctest --output-on-failure -R test_dist_mnist -V
ctest --output-on-failure -R test_listen_and_serv_op -V
ctest --output-on-failure -R test_debugger -V
ctest --output-on-failure -R test_dist_transformer -V
ctest --output-on-failure -R test_dist_se_resnext -V
# make install should also be test when unittest # make install should also be test when unittest
make install -j `nproc` make install -j `nproc`
pip install /usr/local/opt/paddle/share/wheels/*.whl pip install /usr/local/opt/paddle/share/wheels/*.whl
......
...@@ -123,7 +123,7 @@ def get_numeric_gradient(place, ...@@ -123,7 +123,7 @@ def get_numeric_gradient(place,
y_neg = get_output() y_neg = get_output()
__set_elem__(tensor_to_check, i, origin) __set_elem__(tensor_to_check, i, origin)
gradient_flat[i] = (y_pos - y_neg) / delta / 2 gradient_flat[i] = (y_pos - y_neg) / delta // 2
return gradient_flat.reshape(tensor_to_check.shape()) return gradient_flat.reshape(tensor_to_check.shape())
......
...@@ -27,6 +27,7 @@ import unittest ...@@ -27,6 +27,7 @@ import unittest
from multiprocessing import Process from multiprocessing import Process
import os import os
import signal import signal
import six
import collections import collections
SEED = 1 SEED = 1
...@@ -55,7 +56,8 @@ def cnn_model(data): ...@@ -55,7 +56,8 @@ def cnn_model(data):
# TODO(dzhwinter) : refine the initializer and random seed settting # TODO(dzhwinter) : refine the initializer and random seed settting
SIZE = 10 SIZE = 10
input_shape = conv_pool_2.shape input_shape = conv_pool_2.shape
param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] param_shape = [six.moves.reduce(lambda a, b: a * b, input_shape[1:], 1)
] + [SIZE]
scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5
predict = fluid.layers.fc( predict = fluid.layers.fc(
......
...@@ -39,10 +39,17 @@ class PReluTest(OpTest): ...@@ -39,10 +39,17 @@ class PReluTest(OpTest):
alpha_np = np.random.rand(*x_np.shape).astype("float32") alpha_np = np.random.rand(*x_np.shape).astype("float32")
self.inputs = {'X': x_np, 'Alpha': alpha_np} self.inputs = {'X': x_np, 'Alpha': alpha_np}
import sys
print('self.inputs', self.inputs)
sys.stdout.flush()
out_np = np.maximum(self.inputs['X'], 0.) out_np = np.maximum(self.inputs['X'], 0.)
out_np = out_np + np.minimum(self.inputs['X'], out_np = out_np + np.minimum(self.inputs['X'],
0.) * self.inputs['Alpha'] 0.) * self.inputs['Alpha']
assert out_np is not self.inputs['X'] assert out_np is not self.inputs['X']
import sys
print('self.outputs', self.outputs)
sys.stdout.flush()
self.outputs = {'Out': out_np} self.outputs = {'Out': out_np}
def initTestCase(self): def initTestCase(self):
......
...@@ -369,7 +369,7 @@ class DistributeTranspiler(object): ...@@ -369,7 +369,7 @@ class DistributeTranspiler(object):
# FIXME(gongwb): delete not need ops. # FIXME(gongwb): delete not need ops.
# note that: some parameter is not trainable and those ops can't be deleted. # note that: some parameter is not trainable and those ops can't be deleted.
for varname, splited_var in self.param_var_mapping.iteritems(): for varname, splited_var in six.iteritems(self.param_var_mapping):
# Get the eplist of recv vars # Get the eplist of recv vars
eps = [] eps = []
for var in splited_var: for var in splited_var:
...@@ -406,7 +406,7 @@ class DistributeTranspiler(object): ...@@ -406,7 +406,7 @@ class DistributeTranspiler(object):
RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE
}) })
for varname, splited_var in self.param_var_mapping.iteritems(): for varname, splited_var in six.iteritems(self.param_var_mapping):
#add concat ops to merge splited parameters received from parameter servers. #add concat ops to merge splited parameters received from parameter servers.
if len(splited_var) <= 1: if len(splited_var) <= 1:
continue continue
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册