提交 35e6abd7 编写于 作者: M minqiyang

Change iter_parameters back and port unittests code to Python3

上级 e8493620
......@@ -963,9 +963,9 @@ class Block(object):
raise ValueError("Var {0} is not found recursively".format(name))
def all_parameters(self):
return list(self._iter_parameters())
return list(self.iter_parameters())
def _iter_parameters(self):
def iter_parameters(self):
return (item[1] for item in list(self.vars.items())
if isinstance(item[1], Parameter))
......@@ -1199,7 +1199,7 @@ class Block(object):
if not isinstance(other, Block):
raise TypeError(
"_copy_param_info_from should be invoked with Block")
for p in other._iter_parameters():
for p in other.iter_parameters():
assert isinstance(p, Parameter)
v = self.vars.get(p.name, None)
if v is None:
......
......@@ -155,7 +155,7 @@ def train_main(use_cuda):
]
feeder = fluid.DataFeeder(feed_list, place)
for pass_id in xrange(1):
for pass_id in range(1):
for batch_id, data in enumerate(train_reader()):
outs = exe.run(main_program,
feed=feeder.feed(data),
......@@ -204,8 +204,8 @@ def decode_main(use_cuda):
]
feeder = fluid.DataFeeder(feed_list, place)
data = train_reader().next()
feed_dict = feeder.feed(map(lambda x: [x[0]], data))
data = next(train_reader())
feed_dict = feeder.feed([[x[0]] for x in data])
feed_dict['init_ids'] = init_ids
feed_dict['init_scores'] = init_scores
......@@ -214,7 +214,7 @@ def decode_main(use_cuda):
feed=feed_dict,
fetch_list=[translation_ids, translation_scores],
return_numpy=False)
print result_ids.lod()
print(result_ids.lod())
class TestBeamSearchDecoder(unittest.TestCase):
......
......@@ -301,7 +301,7 @@ class DistSeResneXt2x2:
trainer_id=trainer_id)
feed_var_list = [
var for var in trainer_prog.global_block().vars.itervalues()
var for var in trainer_prog.global_block().vars.values()
if var.is_data
]
......@@ -309,7 +309,7 @@ class DistSeResneXt2x2:
reader_generator = train_reader()
first_loss, = exe.run(fetch_list=[avg_cost.name])
print(first_loss)
for i in xrange(5):
for i in range(5):
loss, = exe.run(fetch_list=[avg_cost.name])
last_loss, = exe.run(fetch_list=[avg_cost.name])
print(last_loss)
......
......@@ -25,14 +25,16 @@ from paddle.fluid.backward import append_backward
from paddle.fluid.op import Operator
from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, OpProtoHolder, Variable
from testsuite import create_op, set_input, append_input_output, append_loss_ops
from .testsuite import create_op, set_input, append_input_output, append_loss_ops
from functools import reduce
from six.moves import zip
def randomize_probability(batch_size, class_num, dtype='float32'):
prob = np.random.uniform(
0.1, 1.0, size=(batch_size, class_num)).astype(dtype)
prob_sum = prob.sum(axis=1)
for i in xrange(len(prob)):
for i in range(len(prob)):
prob[i] /= prob_sum[i]
return prob
......@@ -86,7 +88,7 @@ def get_numeric_gradient(place,
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
for i in xrange(tensor_size):
for i in range(tensor_size):
if in_place:
set_input(scope, op, inputs, place)
......@@ -139,7 +141,7 @@ class OpTest(unittest.TestCase):
assert isinstance(
numpy_dict,
dict), "self.inputs, self.outputs must be numpy_dict"
for var_name, var_value in numpy_dict.iteritems():
for var_name, var_value in numpy_dict.items():
if isinstance(var_value, (np.ndarray, np.generic)):
self.try_call_once(var_value.dtype)
elif isinstance(var_value, (list, tuple)):
......@@ -197,7 +199,7 @@ class OpTest(unittest.TestCase):
def _get_io_vars(self, block, numpy_inputs):
inputs = {}
for name, value in numpy_inputs.iteritems():
for name, value in numpy_inputs.items():
if isinstance(value, list):
var_list = [
block.var(sub_name) for sub_name, sub_value in value
......@@ -240,7 +242,7 @@ class OpTest(unittest.TestCase):
# if the fetch_list is customized by user, we use it directly.
# if not, fill the fetch_list by the user configured outputs in test.
if len(fetch_list) == 0:
for var_name, var in outputs.iteritems():
for var_name, var in outputs.items():
if isinstance(var, list):
for v in var:
fetch_list.append(v)
......@@ -252,7 +254,7 @@ class OpTest(unittest.TestCase):
fetch_list.append(str(out_name))
# fetch_list = map(block.var, fetch_list)
if not isinstance(fetch_list[0], fluid.framework.Variable):
fetch_list = map(block.var, fetch_list)
fetch_list = list(map(block.var, fetch_list))
outs = executor.run(program,
feed=feed_map,
fetch_list=fetch_list,
......@@ -334,7 +336,7 @@ class OpTest(unittest.TestCase):
def __assert_is_close(self, numeric_grads, analytic_grads, names,
max_relative_error, msg_prefix):
for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):
for a, b, name in zip(numeric_grads, analytic_grads, names):
abs_a = np.abs(a)
abs_a[abs_a < 1e-3] = 1
......@@ -460,6 +462,6 @@ class OpTest(unittest.TestCase):
use_cuda=use_cuda, loss_name=loss.name, main_program=program)
else:
executor = Executor(place)
return map(np.array,
executor.run(prog, feed_dict, fetch_list,
return_numpy=False))
return list(
map(np.array,
executor.run(prog, feed_dict, fetch_list, return_numpy=False)))
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestAccuracyOp(OpTest):
......@@ -26,7 +26,7 @@ class TestAccuracyOp(OpTest):
label = np.random.randint(0, 2, (n, 1))
self.inputs = {'Out': infer, 'Indices': indices, "Label": label}
num_correct = 0
for rowid in xrange(n):
for rowid in range(n):
for ele in indices[rowid]:
if ele == label[rowid]:
num_correct += 1
......
......@@ -15,9 +15,9 @@
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from .op_test import OpTest
from scipy.special import expit
from test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs
from .test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs
class TestMKLDNNReluDim2(TestRelu):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from .op_test import OpTest
from scipy.special import expit
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestAdadeltaOp1(OpTest):
......
......@@ -16,7 +16,7 @@ import unittest
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
from op_test import OpTest
from .op_test import OpTest
import math
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
from paddle.fluid import core
from paddle.fluid.op import Operator
......@@ -273,7 +273,7 @@ class TestSparseAdamOp(unittest.TestCase):
self.setup(scope, place)
op_args = dict()
for key, np_array in self.dense_inputs.iteritems():
for key, np_array in self.dense_inputs.items():
var = scope.var(key).get_tensor()
var.set(np_array, place)
op_args[key] = key
......@@ -290,7 +290,7 @@ class TestSparseAdamOp(unittest.TestCase):
adam_op = Operator("adam", **op_args)
adam_op.run(scope, place)
for key, np_array in self.outputs.iteritems():
for key, np_array in self.outputs.items():
out_var = scope.var(key).get_tensor()
actual = np.array(out_var)
actual = actual.reshape([actual.size])
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestAdamaxOp1(OpTest):
......
......@@ -16,7 +16,7 @@ import unittest
import numpy as np
import sys
import math
from op_test import OpTest
from .op_test import OpTest
def anchor_generator_in_python(input_feat, anchor_sizes, aspect_ratios,
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class BaseTestCase(OpTest):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestArgsortOp(OpTest):
......
......@@ -80,8 +80,9 @@ class TestArrayReadWrite(unittest.TestCase):
append_backward(total_sum_scaled)
g_vars = map(default_main_program().global_block().var,
[each_x.name + "@GRAD" for each_x in x])
g_vars = list(
map(default_main_program().global_block().var,
[each_x.name + "@GRAD" for each_x in x]))
g_out = [
item.sum()
for item in exe.run(
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import op_test
from . import op_test
import numpy
import unittest
......
......@@ -14,7 +14,7 @@
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import op_test
from . import op_test
import numpy
import unittest
import paddle.fluid.framework as framework
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
from paddle.fluid import metrics
......
......@@ -17,9 +17,9 @@ import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
from op_test import OpTest
from .op_test import OpTest
from paddle.fluid.framework import grad_var_name
from test_batch_norm_op import TestBatchNormOpInference, TestBatchNormOpTraining, _reference_training, _reference_grad
from .test_batch_norm_op import TestBatchNormOpInference, TestBatchNormOpTraining, _reference_training, _reference_grad
class TestMKLDNNBatchNormOpTraining(TestBatchNormOpTraining):
......
......@@ -17,7 +17,7 @@ import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
from op_test import OpTest
from .op_test import OpTest
from paddle.fluid.framework import grad_var_name
......@@ -415,7 +415,7 @@ class TestBatchNormOpTraining(unittest.TestCase):
self.__assert_close(scale_grad, out[6], "scale_grad")
self.__assert_close(bias_grad, out[7], "bias_grad")
print "op test forward passed: ", str(place), data_layout
print("op test forward passed: ", str(place), data_layout)
places = [core.CPUPlace()]
......
......@@ -59,8 +59,7 @@ class BeamSearchOpTester(unittest.TestCase):
np.allclose(
np.array(selected_scores),
np.array([0.5, 0.6, 0.9, 0.7])[:, np.newaxis]))
self.assertEqual(selected_ids.lod(),
[[0L, 2L, 4L], [0L, 1L, 2L, 3L, 4L]])
self.assertEqual(selected_ids.lod(), [[0, 2, 4], [0, 1, 2, 3, 4]])
def _create_pre_ids(self):
np_data = np.array([[1, 2, 3, 4]], dtype='int64')
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
import paddle.fluid.core as core
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestBilinearTensorProductOp(OpTest):
......
......@@ -13,7 +13,7 @@
#limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
def bipartite_match(distance, match_indices, match_dist):
......@@ -48,7 +48,7 @@ def bipartite_match(distance, match_indices, match_dist):
def argmax_match(distance, match_indices, match_dist, threshold):
r, c = distance.shape
for j in xrange(c):
for j in range(c):
if match_indices[j] != -1:
continue
col_dist = distance[:, j]
......
......@@ -16,7 +16,7 @@ import unittest
import numpy as np
import sys
import math
from op_test import OpTest
from .op_test import OpTest
def box_coder(target_box, prior_box, prior_box_var, output_box, code_type,
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import op_test
from . import op_test
import unittest
import numpy as np
import paddle.fluid.core as core
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class Segment(object):
......@@ -63,7 +63,7 @@ class TestChunkEvalOp(OpTest):
# generate chunk beginnings
chunk_begins = sorted(
np.random.choice(
range(starts[-1]), num_chunks, replace=False))
list(range(starts[-1])), num_chunks, replace=False))
seq_chunk_begins = []
begin_idx = 0
# divide chunks into sequences
......@@ -93,7 +93,7 @@ class TestChunkEvalOp(OpTest):
self.num_infer_chunks + self.num_label_chunks
- self.num_correct_chunks)
correct_chunks = np.random.choice(
range(len(chunks)), self.num_correct_chunks, replace=False)
list(range(len(chunks))), self.num_correct_chunks, replace=False)
infer_chunks = np.random.choice(
[x for x in range(len(chunks)) if x not in correct_chunks],
self.num_infer_chunks - self.num_correct_chunks,
......@@ -138,7 +138,8 @@ class TestChunkEvalOp(OpTest):
infer.fill(self.num_chunk_types * self.num_tag_types)
label = np.copy(infer)
starts = np.random.choice(
range(1, self.batch_size), self.num_sequences - 1,
list(range(1, self.batch_size)),
self.num_sequences - 1,
replace=False).tolist()
starts.extend([0, self.batch_size])
starts = sorted(starts)
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestClipByNormOp(OpTest):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestClipOp(OpTest):
......
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import op_test
from . import op_test
import unittest
import numpy
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestConcatOp(OpTest):
......
......@@ -39,7 +39,7 @@ class ConditionalBlockTest(unittest.TestCase):
x = numpy.random.random(size=(10, 1)).astype('float32')
outs = exe.run(feed={'X': x}, fetch_list=[out])[0]
print outs
print(outs)
loss = layers.mean(out)
append_backward(loss=loss)
outs = exe.run(
......@@ -47,7 +47,7 @@ class ConditionalBlockTest(unittest.TestCase):
fetch_list=[
default_main_program().block(0).var(data.name + "@GRAD")
])[0]
print outs
print(outs)
if __name__ == '__main__':
......
......@@ -14,7 +14,7 @@
import unittest
from test_conv2d_op import TestConv2dOp, TestWithPad, TestWithStride
from .test_conv2d_op import TestConv2dOp, TestWithPad, TestWithStride
class TestMKLDNN(TestConv2dOp):
......
......@@ -16,7 +16,7 @@ import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from .op_test import OpTest
def conv2d_forward_naive(input, filter, group, conv_param):
......
......@@ -16,7 +16,7 @@ import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from .op_test import OpTest
def conv2dtranspose_forward_naive(input_, filter_, attrs):
......
......@@ -16,7 +16,7 @@ import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from .op_test import OpTest
def conv3d_forward_naive(input, filter, group, conv_param):
......
......@@ -16,7 +16,7 @@ import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from .op_test import OpTest
def conv3dtranspose_forward_naive(input_, filter_, attrs):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
def conv_shift_forward(x, y):
......@@ -22,8 +22,8 @@ def conv_shift_forward(x, y):
M = x.shape[1]
N = y.shape[1]
y_half_width = (N - 1) / 2
for i in xrange(M):
for j in xrange(N):
for i in range(M):
for j in range(N):
out[:, i] += x[:, (i + j + M - y_half_width) % M] * y[:, j]
return out
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestCosSimOp(OpTest):
......
......@@ -18,7 +18,7 @@ import paddle.fluid.layers as layers
class TestDocString(unittest.TestCase):
def test_layer_doc_string(self):
print layers.dropout.__doc__
print(layers.dropout.__doc__)
if __name__ == '__main__':
......
......@@ -16,7 +16,7 @@ import unittest
import random
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class CRFDecoding(object):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
def crop(data, offsets, crop_shape):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest, randomize_probability
from .op_test import OpTest, randomize_probability
class TestCrossEntropyOp1(OpTest):
......
......@@ -15,8 +15,8 @@
import sys
import unittest
import numpy as np
from op_test import OpTest
from test_softmax_op import stable_softmax
from .op_test import OpTest
from .test_softmax_op import stable_softmax
def CTCAlign(input, lod, blank, merge_repeated):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestSumOp1(OpTest):
......
......@@ -21,7 +21,7 @@ import numpy as np
class TestDataBalance(unittest.TestCase):
def prepare_data(self):
def fake_data_generator():
for n in xrange(self.total_ins_num):
for n in range(self.total_ins_num):
yield np.ones((3, 4)) * n, n
# Prepare data
......@@ -41,7 +41,7 @@ class TestDataBalance(unittest.TestCase):
def prepare_lod_data(self):
def fake_data_generator():
for n in xrange(1, self.total_ins_num + 1):
for n in range(1, self.total_ins_num + 1):
d1 = (np.ones((n, 3)) * n).astype('float32')
d2 = (np.array(n).reshape((1, 1))).astype('int32')
yield d1, d2
......@@ -58,9 +58,9 @@ class TestDataBalance(unittest.TestCase):
(0, 1))
]
lod = [0]
for _ in xrange(self.batch_size):
for _ in range(self.batch_size):
try:
ins = generator.next()
ins = next(generator)
except StopIteration:
eof = True
break
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestDecayedAdagradOp1(OpTest):
......
......@@ -39,7 +39,7 @@ class TestDefaultScopeFuncs(unittest.TestCase):
self.assertTrue(i.is_int())
self.assertEqual(10, i.get_int())
for _ in xrange(10):
for _ in range(10):
scoped_function(__new_scope__)
......
......@@ -17,7 +17,7 @@ import numpy as np
import sys
import collections
import math
from op_test import OpTest
from .op_test import OpTest
class TestDetectionMAPOp(OpTest):
......@@ -176,7 +176,7 @@ class TestDetectionMAPOp(OpTest):
true_pos[label].append([score, tp])
false_pos[label].append([score, fp])
for (label, label_pos_num) in label_count.items():
for (label, label_pos_num) in list(label_count.items()):
if label_pos_num == 0 or label not in true_pos: continue
label_true_pos = true_pos[label]
label_false_pos = false_pos[label]
......
......@@ -25,6 +25,7 @@ import unittest
from multiprocessing import Process
import os
import signal
from functools import reduce
SEED = 1
DTYPE = "float32"
......@@ -172,12 +173,12 @@ class TestDistMnist(unittest.TestCase):
exe.run(fluid.default_startup_program())
feed_var_list = [
var for var in trainer_prog.global_block().vars.itervalues()
var for var in trainer_prog.global_block().vars.values()
if var.is_data
]
feeder = fluid.DataFeeder(feed_var_list, place)
for pass_id in xrange(10):
for pass_id in range(10):
for batch_id, data in enumerate(train_reader()):
exe.run(trainer_prog, feed=feeder.feed(data))
......
......@@ -151,7 +151,7 @@ class TestBasicModelWithLargeBlockSize(TranspilerTest):
["fill_constant", "fill_constant", "fill_constant"])
# the variable #fc_w will be split into two blocks
fc_w_var = startup2.global_block().var("fc_w")
self.assertEqual(fc_w_var.shape, (1000L, 1000L))
self.assertEqual(fc_w_var.shape, (1000, 1000))
# all parameters should be optimized on pserver
pserver_params = []
......@@ -184,9 +184,9 @@ class TestNoSliceVar(TranspilerTest):
_, startup = self.get_pserver(self.pserver1_ep, config)
_, startup2 = self.get_pserver(self.pserver2_ep, config)
if startup.global_block().vars.has_key("fc_w"):
if "fc_w" in startup.global_block().vars:
fc_w_var = startup.global_block().vars["fc_w"]
elif startup2.global_block().vars.has_key("fc_w"):
elif "fc_w" in startup2.global_block().vars:
fc_w_var = startup2.global_block().vars["fc_w"]
self.assertEqual(fc_w_var.shape, (1000, 1000))
......
......@@ -183,12 +183,12 @@ class TestDistMnist(unittest.TestCase):
exec_strategy=exec_strategy)
feed_var_list = [
var for var in trainer_prog.global_block().vars.itervalues()
var for var in trainer_prog.global_block().vars.values()
if var.is_data
]
feeder = fluid.DataFeeder(feed_var_list, place)
for pass_id in xrange(10):
for pass_id in range(10):
for batch_id, data in enumerate(train_reader()):
avg_loss_np = train_exe.run(feed=feeder.feed(data),
fetch_list=[avg_cost.name])
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from .op_test import OpTest
class TestDropoutOp(OpTest):
......
......@@ -135,7 +135,7 @@ class TestDynRNN(unittest.TestCase):
loss_0 = exe.run(main_program,
feed=feeder.feed(data),
fetch_list=[loss])[0]
for _ in xrange(100):
for _ in range(100):
val = exe.run(main_program,
feed=feeder.feed(data),
fetch_list=[loss])[0]
......
......@@ -17,7 +17,7 @@ import random
import collections
import paddle.fluid as fluid
import unittest
from decorators import *
from .decorators import *
class Memory(object):
......@@ -30,12 +30,12 @@ class Memory(object):
assert val.dtype == self.ex.dtype
self.cur = val
def next(self):
def __next__(self):
self.ex = self.cur
self.cur = None
def __next__(self):
self.next()
next(self)
def reset(self):
self.ex = numpy.zeros(shape=self.ex.shape, dtype=self.ex.dtype)
......@@ -61,13 +61,13 @@ class BaseRNN(object):
self.num_seq = num_seq
self.inputs = collections.defaultdict(list)
for _ in xrange(num_seq):
for _ in range(num_seq):
seq_len = random.randint(1, max_seq_len - 1)
for iname in ins:
ishape = ins[iname].get('shape', None)
idtype = ins[iname].get('dtype', 'float32')
lst = []
for _ in xrange(seq_len):
for _ in range(seq_len):
lst.append(numpy.random.random(size=ishape).astype(idtype))
self.inputs[iname].append(lst)
......@@ -96,16 +96,16 @@ class BaseRNN(object):
for out in self.outputs:
retv[out] = []
for seq_id in xrange(self.num_seq):
for seq_id in range(self.num_seq):
for mname in self.mems:
self.mems[mname].reset()
for out in self.outputs:
self.outputs[out].next_sequence()
iname0 = self.inputs.keys()[0]
iname0 = list(self.inputs.keys())[0]
seq_len = len(self.inputs[iname0][seq_id])
for step_id in xrange(seq_len):
for step_id in range(seq_len):
xargs = dict()
for iname in self.inputs:
......@@ -138,7 +138,7 @@ class BaseRNN(object):
for iname in self.inputs:
lod = []
np_flatten = []
for seq_id in xrange(len(self.inputs[iname])):
for seq_id in range(len(self.inputs[iname])):
seq_len = len(self.inputs[iname][seq_id])
lod.append(seq_len)
np_flatten.extend(self.inputs[iname][seq_id])
......@@ -159,8 +159,8 @@ class BaseRNN(object):
" which is not matrix")
g = numpy.zeros(shape=p.shape, dtype=p.dtype)
for i in xrange(p.shape[0]):
for j in xrange(p.shape[1]):
for i in range(p.shape[0]):
for j in range(p.shape[1]):
o = p[i][j]
p[i][j] += delta
pos = self._exe_mean_out_()
......@@ -184,7 +184,7 @@ class BaseRNN(object):
if len(item.shape) != 1:
raise ValueError("Not support")
for i in xrange(len(item)):
for i in range(len(item)):
o = item[i]
item[i] += delta
pos = self._exe_mean_out_()
......@@ -198,14 +198,14 @@ class BaseRNN(object):
if not return_one_tensor:
return grad
for i in xrange(len(grad)):
for i in range(len(grad)):
grad[i] = numpy.concatenate(grad[i])
grad = numpy.concatenate(grad)
return grad
def _exe_mean_out_(self):
outs = self.exe()
return numpy.array([o.mean() for o in outs.itervalues()]).mean()
return numpy.array([o.mean() for o in outs.values()]).mean()
class SeedFixedTestCase(unittest.TestCase):
......@@ -274,13 +274,14 @@ class TestSimpleMul(SeedFixedTestCase):
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
out, w_g, i_g = map(numpy.array,
exe.run(feed=py_rnn.to_feed(cpu),
fetch_list=[
out, self.PARAM_NAME + "@GRAD",
self.DATA_NAME + "@GRAD"
],
return_numpy=False))
out, w_g, i_g = list(
map(numpy.array,
exe.run(feed=py_rnn.to_feed(cpu),
fetch_list=[
out, self.PARAM_NAME + "@GRAD", self.DATA_NAME +
"@GRAD"
],
return_numpy=False)))
out_by_python = py_rnn.exe()[self.OUT_NAME]
self.assertTrue(numpy.allclose(out, out_by_python))
w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME)
......@@ -351,14 +352,15 @@ class TestSimpleMulWithMemory(SeedFixedTestCase):
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
feed = py_rnn.to_feed(cpu)
last_np, w_g, i_g = map(numpy.array,
exe.run(feed=feed,
fetch_list=[
last, self.PARAM_NAME + "@GRAD",
self.DATA_NAME + "@GRAD"
],
return_numpy=False))
last_by_py, = py_rnn.exe().values()
last_np, w_g, i_g = list(
map(numpy.array,
exe.run(feed=feed,
fetch_list=[
last, self.PARAM_NAME + "@GRAD", self.DATA_NAME +
"@GRAD"
],
return_numpy=False)))
last_by_py, = list(py_rnn.exe().values())
w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME)
self.assertTrue(numpy.allclose(last_np, last_by_py))
......
......@@ -67,7 +67,7 @@ class TestDyRnnStaticInput(unittest.TestCase):
def _lodtensor_to_ndarray(self, lod_tensor):
dims = lod_tensor.shape()
ndarray = np.zeros(shape=dims).astype('float32')
for i in xrange(np.product(dims)):
for i in range(np.product(dims)):
ndarray.ravel()[i] = lod_tensor._get_float_element(i)
return ndarray, lod_tensor.recursive_sequence_lengths()
......@@ -114,7 +114,7 @@ class TestDyRnnStaticInput(unittest.TestCase):
shape=[1], dtype='int64', value=0)
step_idx.stop_gradient = True
for i in xrange(self._max_sequence_len):
for i in range(self._max_sequence_len):
step_out = fluid.layers.array_read(static_input_out_array,
step_idx)
step_out.stop_gradient = True
......@@ -140,27 +140,27 @@ class TestDyRnnStaticInput(unittest.TestCase):
static_lod = self.static_input_tensor.recursive_sequence_lengths()
static_sliced = []
cur_offset = 0
for i in xrange(len(static_lod[0])):
for i in range(len(static_lod[0])):
static_sliced.append(self.static_input_data[cur_offset:(
cur_offset + static_lod[0][i])])
cur_offset += static_lod[0][i]
static_seq_len = static_lod[0]
static_reordered = []
for i in xrange(len(x_sorted_indices)):
for i in range(len(x_sorted_indices)):
static_reordered.extend(static_sliced[x_sorted_indices[i]].tolist())
static_seq_len_reordered = [
static_seq_len[x_sorted_indices[i]]
for i in xrange(len(x_sorted_indices))
for i in range(len(x_sorted_indices))
]
static_step_outs = []
static_step_lods = []
for i in xrange(self._max_sequence_len):
for i in range(self._max_sequence_len):
end = len(x_seq_len) - bisect.bisect_left(x_seq_len_sorted, i + 1)
lod = []
total_len = 0
for i in xrange(end):
for i in range(end):
lod.append(static_seq_len_reordered[i])
total_len += lod[-1]
static_step_lods.append([lod])
......@@ -174,7 +174,7 @@ class TestDyRnnStaticInput(unittest.TestCase):
static_step_outs = self.build_graph(only_forward=True)
self.exe.run(framework.default_startup_program())
expected_outs, expected_lods = self.get_expected_static_step_outs()
for i in xrange(self._max_sequence_len):
for i in range(self._max_sequence_len):
step_out, lod = self.fetch_value(static_step_outs[i])
self.assertTrue(np.allclose(step_out, expected_outs[i]))
self.assertTrue(np.allclose(lod, expected_lods[i]))
......@@ -189,7 +189,7 @@ class TestDyRnnStaticInput(unittest.TestCase):
numeric_gradients = np.zeros(shape=static_input_shape).astype('float32')
# calculate numeric gradients
tensor_size = np.product(static_input_shape)
for i in xrange(tensor_size):
for i in range(tensor_size):
origin = self.static_input_tensor._get_float_element(i)
x_pos = origin + self._delta
self.static_input_tensor._set_float_element(i, x_pos)
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
def Levenshtein(hyp, ref):
......
......@@ -14,8 +14,8 @@
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from test_elementwise_add_op import *
from .op_test import OpTest
from .test_elementwise_add_op import *
'''
Some tests differ from the tests defined in test_elementwise_add_op.py
because MKLDNN does not support tensors of number of dimensions 3.
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from .op_test import OpTest
class TestElementwiseAddOp(OpTest):
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class ElementwiseDivOp(OpTest):
......
......@@ -26,7 +26,7 @@ class TestElementWiseAddOp(unittest.TestCase):
def test_with_place(place):
out_grad = np.random.random_sample(self.x.shape).astype(np.float32)
x_grad = out_grad
sum_axis = range(0, len(self.x.shape))
sum_axis = list(range(0, len(self.x.shape)))
del sum_axis[self.axis]
y_grad = np.sum(out_grad, axis=tuple(sum_axis))
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestElementwiseOp(OpTest):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestElementwiseOp(OpTest):
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class ElementwiseMulOp(OpTest):
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestElementwisePowOp(OpTest):
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestElementwiseOp(OpTest):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestExpandOpRank1(OpTest):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
import math
from op_test import OpTest
from .op_test import OpTest
def quantize_max_abs(x, num_bits):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestFakeQuantizeOp(OpTest):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
def fully_connected_naive(input, weights, bias_data=None):
......
......@@ -14,7 +14,7 @@
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import op_test
from . import op_test
import numpy
import unittest
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestFillConstantBatchSizeLikeWhenFirstDimIsBatchSize(OpTest):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestFillConstantOp1(OpTest):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
import paddle.fluid.core as core
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestFillZerosLikeOp(OpTest):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestFTRLOp(OpTest):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestGatherOp(OpTest):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestGaussianRandomBatchSizeLike(OpTest):
......
......@@ -14,7 +14,7 @@
import unittest
from test_gaussian_random_op import TestGaussianRandomOp
from .test_gaussian_random_op import TestGaussianRandomOp
class TestMKLDNN(TestGaussianRandomOp):
......
......@@ -14,7 +14,7 @@
import paddle.fluid as fluid
from paddle.fluid.layers.device import get_places
import decorators
from . import decorators
import unittest
......
......@@ -15,8 +15,8 @@
import unittest
import numpy as np
import math
from op_test import OpTest
from test_lstm_op import identity, sigmoid, tanh, relu
from .op_test import OpTest
from .test_lstm_op import identity, sigmoid, tanh, relu
class TestGRUOp(OpTest):
......@@ -38,7 +38,7 @@ class TestGRUOp(OpTest):
for i in range(len(seq_lens)):
seq_starts.append(seq_starts[-1] + seq_lens[i])
sorted_seqs = sorted(
range(len(seq_lens)), lambda x, y: seq_lens[y] - seq_lens[x])
list(range(len(seq_lens))), lambda x, y: seq_lens[y] - seq_lens[x])
num_batch = seq_lens[sorted_seqs[0]]
for batch_idx in range(num_batch):
idx_in_seq = []
......@@ -74,15 +74,16 @@ class TestGRUOp(OpTest):
def gru(self):
input, lod = self.inputs['Input']
w = self.inputs['Weight']
b = self.inputs['Bias'] if self.inputs.has_key('Bias') else np.zeros(
b = self.inputs['Bias'] if 'Bias' in self.inputs else np.zeros(
(1, self.frame_size * 3))
batch_gate = self.outputs['BatchGate']
batch_reset_hidden_prev = self.outputs['BatchResetHiddenPrev']
batch_hidden = self.outputs['BatchHidden']
hidden = self.outputs['Hidden']
idx_in_seq_list = self.idx_in_seq_list
h_p = self.inputs['H0'][self.sorted_seqs] if self.inputs.has_key(
'H0') else np.zeros((len(idx_in_seq_list[0]), self.frame_size))
h_p = self.inputs['H0'][
self.sorted_seqs] if 'H0' in self.inputs else np.zeros(
(len(idx_in_seq_list[0]), self.frame_size))
num_batch = len(idx_in_seq_list)
end_idx = 0
for batch_idx in range(num_batch):
......
......@@ -15,7 +15,7 @@
import math
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class GRUActivationType(OpTest):
......@@ -76,7 +76,7 @@ class TestGRUUnitOp(OpTest):
x = self.inputs['Input']
h_p = self.inputs['HiddenPrev']
w = self.inputs['Weight']
b = self.inputs['Bias'] if self.inputs.has_key('Bias') else np.zeros(
b = self.inputs['Bias'] if 'Bias' in self.inputs else np.zeros(
(1, frame_size * 3))
g = x + np.tile(b, (batch_size, 1))
w_u_r = w.flatten()[:frame_size * frame_size * 2].reshape(
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestHingeLossOp(OpTest):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
import math
from op_test import OpTest
from .op_test import OpTest
def find_latest_set(num):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
def huber_loss_forward(val, delta):
......
......@@ -13,7 +13,7 @@
#limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
def get_output_shape(attrs, in_shape, img_real_size):
......
......@@ -43,7 +43,7 @@ class TestLayer(unittest.TestCase):
hidden2 = fluid.layers.fc(input=hidden1, size=128, act='relu')
fluid.layers.batch_norm(input=hidden2)
print str(main_program)
print(str(main_program))
def test_dropout_layer(self):
main_program = Program()
......@@ -53,7 +53,7 @@ class TestLayer(unittest.TestCase):
name='pixel', shape=[3, 48, 48], dtype='float32')
fluid.layers.dropout(x=images, dropout_prob=0.5)
print str(main_program)
print(str(main_program))
def test_img_conv_group(self):
main_program = Program()
......@@ -65,7 +65,7 @@ class TestLayer(unittest.TestCase):
conv1 = conv_block(images, 64, 2, [0.3, 0])
conv_block(conv1, 256, 3, [0.4, 0.4, 0])
print str(main_program)
print(str(main_program))
def test_elementwise_add_with_act(self):
main_program = Program()
......
......@@ -48,7 +48,7 @@ class TestBook(unittest.TestCase):
exe.run(init_program, feed={}, fetch_list=[])
for i in xrange(100):
for i in range(100):
tensor_x = np.array(
[[1, 1], [1, 2], [3, 4], [5, 2]]).astype("float32")
tensor_y = np.array([[-2], [-3], [-7], [-7]]).astype("float32")
......
......@@ -17,7 +17,7 @@ import numpy as np
import numpy.random as random
import sys
import math
from op_test import OpTest
from .op_test import OpTest
class TestIOUSimilarityOp(OpTest):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestEmpty(OpTest):
......
......@@ -14,7 +14,7 @@
import numpy as np
import unittest
from op_test import OpTest
from .op_test import OpTest
class TestL1NormOp(OpTest):
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestLabelSmoothOp(OpTest):
......
......@@ -17,6 +17,7 @@ import numpy as np
from operator import mul
import paddle.fluid.core as core
import paddle.fluid as fluid
from functools import reduce
np.random.random(123)
......
......@@ -20,7 +20,7 @@ from paddle.fluid.layers.device import get_places
import paddle.fluid.nets as nets
from paddle.fluid.framework import Program, program_guard, default_main_program
from paddle.fluid.param_attr import ParamAttr
import decorators
from . import decorators
class TestBook(unittest.TestCase):
......@@ -279,7 +279,7 @@ class TestBook(unittest.TestCase):
def test_nce(self):
window_size = 5
words = []
for i in xrange(window_size):
for i in range(window_size):
words.append(
layers.data(
name='word_{0}'.format(i), shape=[1], dtype='int64'))
......@@ -288,7 +288,7 @@ class TestBook(unittest.TestCase):
label_word = int(window_size / 2) + 1
embs = []
for i in xrange(window_size):
for i in range(window_size):
if i == label_word:
continue
......
......@@ -16,7 +16,7 @@ import unittest
import random
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class LinearChainCrfForward(object):
......
......@@ -20,7 +20,7 @@ import subprocess
import time
import unittest
from multiprocessing import Process
from op_test import OpTest
from .op_test import OpTest
def run_pserver(use_cuda, sync_mode, ip, port, trainers, trainer_id):
......
......@@ -36,7 +36,7 @@ class TestLoDRankTable(unittest.TestCase):
exe.run(scope=scope, feed={'x': tensor})
var = scope.find_var(rank_table.name)
table = var.get_lod_rank_table()
self.assertEqual([(0, 5), (1, 1), (2, 1)], table.items())
self.assertEqual([(0, 5), (1, 1), (2, 1)], list(table.items()))
if __name__ == '__main__':
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from .op_test import OpTest
class TestLodResetOpByAttr(OpTest):
......
......@@ -24,7 +24,7 @@ class TestLoDTensorArray(unittest.TestCase):
tensor_array = arr.get_lod_tensor_array()
self.assertEqual(0, len(tensor_array))
cpu = core.CPUPlace()
for i in xrange(10):
for i in range(10):
t = core.LoDTensor()
t.set(numpy.array([i], dtype='float32'), cpu)
t.set_recursive_sequence_lengths([[1]])
......@@ -32,7 +32,7 @@ class TestLoDTensorArray(unittest.TestCase):
self.assertEqual(10, len(tensor_array))
for i in xrange(10):
for i in range(10):
t = tensor_array[i]
self.assertEqual(numpy.array(t), numpy.array([i], dtype='float32'))
self.assertEqual([[1]], t.recursive_sequence_lengths())
......
......@@ -35,8 +35,10 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
tensor.set(
numpy.arange(10).reshape(10, 1).astype('int32'), self.place())
tensor.set_recursive_sequence_lengths([[3, 6, 1]])
expect = map(lambda x: numpy.array(x).astype('int32'),
[[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]])
expect = [
numpy.array(x).astype('int32')
for x in [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]]
]
self.main(
tensor=tensor,
expect_array=expect,
......@@ -48,8 +50,10 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
tensor.set(
numpy.arange(10).reshape(10, 1).astype('int32'), self.place())
tensor.set_recursive_sequence_lengths([[3, 6, 0, 1]])
expect = map(lambda x: numpy.array(x).astype('int32'),
[[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]])
expect = [
numpy.array(x).astype('int32')
for x in [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]]
]
self.main(
tensor=tensor,
expect_array=expect,
......@@ -111,8 +115,8 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
expect = [
numpy.array(
item, dtype='int32')
for item in [[21, 0, 1, 2, 3, 4, 5, 6, 46, 47, 48, 49], range(
22, 39) + range(7, 21), range(39, 46)]
for item in [[21, 0, 1, 2, 3, 4, 5, 6, 46, 47, 48, 49], list(
range(22, 39)) + list(range(7, 21)), list(range(39, 46))]
]
lod = [[[1, 2, 1], [1, 3, 4, 4]], [[4, 3], [1, 4, 4, 8, 4, 6, 4]],
[[2], [6, 1]]]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册