From 223c01fd9101efb14c52f98fb78cbb00e1fdf77e Mon Sep 17 00:00:00 2001 From: Zhou Wei <1183042833@qq.com> Date: Thu, 26 Aug 2021 14:05:39 +0800 Subject: [PATCH] fix iscan python bug (#35148) --- .../tests/unittests/test_gradient_clip.py | 31 +++++++------------ python/paddle/tensor/to_string.py | 10 +++--- 2 files changed, 16 insertions(+), 25 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_gradient_clip.py b/python/paddle/fluid/tests/unittests/test_gradient_clip.py index 9b6dbc00f7c..80cb25bba47 100644 --- a/python/paddle/fluid/tests/unittests/test_gradient_clip.py +++ b/python/paddle/fluid/tests/unittests/test_gradient_clip.py @@ -56,6 +56,7 @@ class TestGradientClip(unittest.TestCase): self.BATCH_SIZE = 2 reader = fake_imdb_reader(self.word_dict_len, self.BATCH_SIZE * 100) self.train_data = paddle.batch(reader, batch_size=self.BATCH_SIZE) + self.clip_gradient = lambda x: None self.init() def init(self): @@ -67,9 +68,6 @@ class TestGradientClip(unittest.TestCase): places.append(fluid.CUDAPlace(0)) return places - def clip_gradient(self, params_grads): - pass - def check_clip_result(self, out, out_clip): pass @@ -132,7 +130,6 @@ class TestGradientClip(unittest.TestCase): data = next(self.train_data()) val = exe.run(prog, feed=feeder.feed(data), fetch_list=[cost])[0] self.assertEqual((1, ), val.shape) - print(val) self.assertFalse(np.isnan(val)) def backward_and_optimize(self, cost): @@ -143,11 +140,6 @@ class TestGradientClipByGlobalNorm(TestGradientClip): def init(self): self.clip_norm = 0.2 - def clip_gradient(self, params_grads): - clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=self.clip_norm) - print(clip) - return clip(params_grads) - def check_clip_result(self, out, out_clip): global_norm = 0 for v in out: @@ -179,7 +171,6 @@ class TestGradientClipByGlobalNorm(TestGradientClip): def test_new_gradient_clip(self): def func(params_grads): clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=self.clip_norm) - print(clip) return clip(params_grads) self.clip_gradient = func @@ -236,11 +227,6 @@ class TestGradientClipByNorm(TestGradientClip): def init(self): self.clip_norm = 0.2 - def clip_gradient(self, params_grads): - clip = fluid.clip.GradientClipByNorm(clip_norm=self.clip_norm) - print(clip) - return clip(params_grads) - def check_clip_result(self, out, out_clip): for u, v in zip(out, out_clip): norm = np.sqrt(np.sum(np.power(u, 2))) @@ -253,6 +239,11 @@ class TestGradientClipByNorm(TestGradientClip): # test whether the ouput is right when use grad_clip def test_gradient_clip(self): + def func(params_grads): + clip = fluid.clip.GradientClipByNorm(clip_norm=self.clip_norm) + return clip(params_grads) + + self.clip_gradient = func self.check_gradient_clip(fluid.CPUPlace()) # if grad is None or not need clip @@ -280,11 +271,6 @@ class TestGradientClipByValue(TestGradientClip): self.max = 0.2 self.min = 0.1 - def clip_gradient(self, params_grads): - clip = fluid.clip.GradientClipByValue(max=self.max, min=self.min) - print(clip) - return clip(params_grads) - def check_clip_result(self, out, out_clip): for i, v in enumerate(out): out[i] = np.clip(v, self.min, self.max) @@ -297,6 +283,11 @@ class TestGradientClipByValue(TestGradientClip): # test whether the ouput is right when use grad_clip def test_gradient_clip(self): + def func(params_grads): + clip = fluid.clip.GradientClipByValue(max=self.max, min=self.min) + return clip(params_grads) + + self.clip_gradient = func self.check_gradient_clip(fluid.CPUPlace()) # if grad is None or not need clip diff --git a/python/paddle/tensor/to_string.py b/python/paddle/tensor/to_string.py index 9d07840be68..e42bb8f95f2 100644 --- a/python/paddle/tensor/to_string.py +++ b/python/paddle/tensor/to_string.py @@ -101,14 +101,14 @@ def _to_sumary(var): return var elif len(var.shape) == 1: if var.shape[0] > 2 * edgeitems: - return np.concatenate([var[:edgeitems], var[-edgeitems:]]) + return np.concatenate([var[:edgeitems], var[(-1 * edgeitems):]]) else: return var else: # recursively handle all dimensions if var.shape[0] > 2 * edgeitems: begin = [x for x in var[:edgeitems]] - end = [x for x in var[-edgeitems:]] + end = [x for x in var[(-1 * edgeitems):]] return np.stack([_to_sumary(x) for x in (begin + end)]) else: return np.stack([_to_sumary(x) for x in var]) @@ -162,10 +162,10 @@ def _format_tensor(var, sumary, indent=0, max_width=0, signed=False): if sumary and var.shape[0] > 2 * edgeitems: items = [ _format_item(item, max_width, signed) - for item in list(var)[:DEFAULT_PRINT_OPTIONS.edgeitems] + for item in list(var)[:edgeitems] ] + ['...'] + [ _format_item(item, max_width, signed) - for item in list(var)[-DEFAULT_PRINT_OPTIONS.edgeitems:] + for item in list(var)[(-1 * edgeitems):] ] else: items = [ @@ -181,7 +181,7 @@ def _format_tensor(var, sumary, indent=0, max_width=0, signed=False): for x in var[:edgeitems] ] + ['...'] + [ _format_tensor(x, sumary, indent + 1, max_width, signed) - for x in var[-edgeitems:] + for x in var[(-1 * edgeitems):] ] else: vars = [ -- GitLab