diff --git a/paddle/fluid/operators/fake_quantize_op.cu b/paddle/fluid/operators/fake_quantize_op.cu index 7c65d6dba7d67b5d31720bae1f4877dd22210138..a0ff6396210c2b3a7f8bd6b9f274b875d7fd4933 100644 --- a/paddle/fluid/operators/fake_quantize_op.cu +++ b/paddle/fluid/operators/fake_quantize_op.cu @@ -119,7 +119,8 @@ struct FindRangeAbsMaxFunctor { const framework::Tensor& last_scale, const framework::Tensor& iter, const int window_size, framework::Tensor* scales_arr, framework::Tensor* out_scale) { - auto& gpu_place = boost::get(ctx.GetPlace()); + const auto gpu_place = boost::get(ctx.GetPlace()); + T* scale_arr = scales_arr->mutable_data(gpu_place); T* out_scale_data = out_scale->mutable_data(gpu_place); diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 7199424b4709fbe9fc962cf98aea6223b9f3e51d..84f9d6671a80889c4ff92832f546d51ef4352007 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -115,6 +115,7 @@ function cmake_gen() { -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DWITH_CONTRIB=${WITH_CONTRIB:-ON} + -DWITH_INFERENCE=${WITH_INFERENCE:-ON} -DWITH_ANAKIN=${WITH_ANAKIN:-OFF} -DPY_VERSION=${PY_VERSION:-2.7} ======================================== @@ -144,6 +145,7 @@ EOF -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} \ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ -DWITH_CONTRIB=${WITH_CONTRIB:-ON} \ + -DWITH_INFERENCE=${WITH_INFERENCE:-ON} \ -DWITH_ANAKIN=${WITH_ANAKIN:-OFF} \ -DPY_VERSION=${PY_VERSION:-2.7} } diff --git a/python/paddle/fluid/tests/unittests/dist_transformer.py b/python/paddle/fluid/tests/unittests/dist_transformer.py index 7abfa0a4be0dec9fe251704e22dfef1f932e7c5b..e3db316698398ff693157d583ad1410d10dcf81d 100644 --- a/python/paddle/fluid/tests/unittests/dist_transformer.py +++ b/python/paddle/fluid/tests/unittests/dist_transformer.py @@ -36,6 +36,7 @@ import paddle.fluid as fluid import paddle.fluid.layers as layers from paddle.fluid import core from test_dist_base import TestDistRunnerBase, runtime_main +import paddle.compat as cpt from paddle.compat import long_type import hashlib @@ -315,8 +316,9 @@ def pad_batch_data(insts, """ return_list = [] max_len = max(len(inst) for inst in insts) - num_token = reduce(lambda x, y: x + y, - [len(inst) for inst in insts]) if return_num_token else 0 + num_token = six.moves.reduce( + lambda x, y: x + y, + [len(inst) for inst in insts]) if return_num_token else 0 # Any token included in dict can be used to pad, since the paddings' loss # will be masked out by weights and make no effect on parameter gradients. inst_data = np.array( @@ -328,7 +330,7 @@ def pad_batch_data(insts, return_list += [inst_weight.astype("float32").reshape([-1, 1])] else: # position data inst_pos = np.array([ - range(1, len(inst) + 1) + [0] * (max_len - len(inst)) + list(range(1, len(inst) + 1)) + [0] * (max_len - len(inst)) for inst in insts ]) return_list += [inst_pos.astype("int64").reshape([-1, 1])] @@ -385,10 +387,11 @@ def prepare_batch_input(insts, data_input_names, src_pad_idx, trg_pad_idx, return_num_token=True) data_input_dict = dict( - zip(data_input_names, [ - src_word, src_pos, src_slf_attn_bias, trg_word, trg_pos, - trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight - ])) + list( + zip(data_input_names, [ + src_word, src_pos, src_slf_attn_bias, trg_word, trg_pos, + trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight + ]))) return data_input_dict, np.asarray([num_token], dtype="float32") @@ -561,7 +564,7 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler, np.log(TrainTaskConfig.label_smooth_eps / ( ModelHyperParams.trg_vocab_size - 1) + 1e-20)) init = False - for pass_id in xrange(TrainTaskConfig.pass_num): + for pass_id in six.moves.xrange(TrainTaskConfig.pass_num): pass_start_time = time.time() for batch_id, data in enumerate(train_data()): if batch_id >= 5: @@ -587,11 +590,11 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler, ModelHyperParams.eos_idx, ModelHyperParams.n_head, ModelHyperParams.d_model) total_num_token += num_token - feed_kv_pairs = data_input_dict.items() + feed_kv_pairs = list(data_input_dict.items()) if TrainTaskConfig.local: - feed_kv_pairs += { + feed_kv_pairs += list({ lr_scheduler.learning_rate.name: lr_rate - }.items() + }.items()) feed_list.append(dict(feed_kv_pairs)) if not init: @@ -873,6 +876,7 @@ class DataReader(object): f = tarfile.open(fpaths[0], "r") for line in f.extractfile(tar_fname): + line = cpt.to_text(line) fields = line.strip("\n").split(self._field_delimiter) if (not self._only_src and len(fields) == 2) or ( self._only_src and len(fields) == 1): @@ -882,8 +886,9 @@ class DataReader(object): if not os.path.isfile(fpath): raise IOError("Invalid file: %s" % fpath) - with open(fpath, "r") as f: + with open(fpath, "rb") as f: for line in f: + line = cpt.to_text(line) fields = line.strip("\n").split(self._field_delimiter) if (not self._only_src and len(fields) == 2) or ( self._only_src and len(fields) == 1): @@ -892,8 +897,9 @@ class DataReader(object): @staticmethod def load_dict(dict_path, reverse=False): word_dict = {} - with open(dict_path, "r") as fdict: + with open(dict_path, "rb") as fdict: for idx, line in enumerate(fdict): + line = cpt.to_text(line) if reverse: word_dict[idx] = line.strip("\n") else: @@ -1034,7 +1040,7 @@ def multi_head_attention(queries, # size of the input as the output dimension size. return layers.reshape( x=trans_x, - shape=map(int, [0, 0, trans_x.shape[2] * trans_x.shape[3]])) + shape=list(map(int, [0, 0, trans_x.shape[2] * trans_x.shape[3]]))) def scaled_dot_product_attention(q, k, v, attn_bias, d_model, dropout_rate): """ diff --git a/python/paddle/fluid/tests/unittests/test_prelu_op.py b/python/paddle/fluid/tests/unittests/test_prelu_op.py index 1e3e40d54a78045c8d8fdd9a3a3715107d1e7a80..48a6b0577b6787d2e1231fdcbe6d2c1bb46414ed 100644 --- a/python/paddle/fluid/tests/unittests/test_prelu_op.py +++ b/python/paddle/fluid/tests/unittests/test_prelu_op.py @@ -16,6 +16,7 @@ from __future__ import print_function import unittest import numpy as np +import six from op_test import OpTest @@ -62,17 +63,20 @@ class PReluTest(OpTest): # TODO(minqiyang): Resume these test cases after fixing Python3 CI job issues -# class TestCase1(PReluTest): -# def initTestCase(self): -# self.attrs = {'mode': "all"} +if six.PY2: -# class TestCase2(PReluTest): -# def initTestCase(self): -# self.attrs = {'mode': "channel"} + class TestCase1(PReluTest): + def initTestCase(self): + self.attrs = {'mode': "all"} + + class TestCase2(PReluTest): + def initTestCase(self): + self.attrs = {'mode': "channel"} + + class TestCase3(PReluTest): + def initTestCase(self): + self.attrs = {'mode': "element"} -# class TestCase3(PReluTest): -# def initTestCase(self): -# self.attrs = {'mode': "element"} if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/transpiler/details/program_utils.py b/python/paddle/fluid/transpiler/details/program_utils.py index f0fafaa84a73d641ff6ceb74def6addaea759516..a83aa0f11eed9bfc1674d8d75dcfacc297f056b0 100644 --- a/python/paddle/fluid/transpiler/details/program_utils.py +++ b/python/paddle/fluid/transpiler/details/program_utils.py @@ -153,7 +153,7 @@ def block_to_code(block, block_idx): indent += 1 # sort all vars - all_vars = sorted(block.vars.iteritems(), key=lambda x: x[0]) + all_vars = sorted(six.iteritems(block.vars), key=lambda x: x[0]) for var in all_vars: print("{}{}".format(get_indent_space(indent), variable_to_code(var[1]))) diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 8a330e0dee7eda02d0858446778363f2235a3d73..d4d218d547a394a56c040ade2a9ba703b691b86b 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -300,7 +300,7 @@ class DistributeTranspiler(object): input_deps = grad_name_to_send_dummy_out.values() program.global_block().append_op( type="send_barrier", - inputs={"X": input_deps}, + inputs={"X": list(input_deps)}, outputs={"Out": send_barrier_out}, attrs={ "endpoints": pserver_endpoints, @@ -401,7 +401,7 @@ class DistributeTranspiler(object): Args: recv_vars (list): Variable list to recv for current trainer_id - eplist (list): A list of strings indicating + eplist (list): A list of strings indicating Returns: Program: trainer side startup program. @@ -455,7 +455,7 @@ class DistributeTranspiler(object): if len(splited_var) <= 1: continue # NOTE: if enable memory optimization, origin vars maybe removed. - if startup_program.global_block().vars.has_key(varname): + if varname in startup_program.global_block().vars: orig_param = startup_program.global_block().vars[varname] else: origin_param_var = self.origin_program.global_block().vars[ @@ -690,7 +690,7 @@ class DistributeTranspiler(object): Args: endpoint (str): current pserver endpoint. - + Returns: tuple: (main_program, startup_program), of type "Program" """ @@ -713,7 +713,7 @@ class DistributeTranspiler(object): endpoint (str): current pserver endpoint. pserver_program (Program): deprecated, call get_pserver_program first. startup_program (Program): deprecated, should pass startup_program - when initalizing + when initalizing Returns: Program: parameter server side startup program.