未验证 提交 b8057515 编写于 作者: Q Qiyang Min 提交者: GitHub

Merge pull request #13223 from velconia/open_python35_CI

Open python35 ci
...@@ -119,7 +119,8 @@ struct FindRangeAbsMaxFunctor<platform::CUDADeviceContext, T> { ...@@ -119,7 +119,8 @@ struct FindRangeAbsMaxFunctor<platform::CUDADeviceContext, T> {
const framework::Tensor& last_scale, const framework::Tensor& last_scale,
const framework::Tensor& iter, const int window_size, const framework::Tensor& iter, const int window_size,
framework::Tensor* scales_arr, framework::Tensor* out_scale) { framework::Tensor* scales_arr, framework::Tensor* out_scale) {
auto& gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace()); const auto gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace());
T* scale_arr = scales_arr->mutable_data<T>(gpu_place); T* scale_arr = scales_arr->mutable_data<T>(gpu_place);
T* out_scale_data = out_scale->mutable_data<T>(gpu_place); T* out_scale_data = out_scale->mutable_data<T>(gpu_place);
......
...@@ -115,6 +115,7 @@ function cmake_gen() { ...@@ -115,6 +115,7 @@ function cmake_gen() {
-DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF}
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_EXPORT_COMPILE_COMMANDS=ON
-DWITH_CONTRIB=${WITH_CONTRIB:-ON} -DWITH_CONTRIB=${WITH_CONTRIB:-ON}
-DWITH_INFERENCE=${WITH_INFERENCE:-ON}
-DWITH_ANAKIN=${WITH_ANAKIN:-OFF} -DWITH_ANAKIN=${WITH_ANAKIN:-OFF}
-DPY_VERSION=${PY_VERSION:-2.7} -DPY_VERSION=${PY_VERSION:-2.7}
======================================== ========================================
...@@ -144,6 +145,7 @@ EOF ...@@ -144,6 +145,7 @@ EOF
-DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} \ -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
-DWITH_CONTRIB=${WITH_CONTRIB:-ON} \ -DWITH_CONTRIB=${WITH_CONTRIB:-ON} \
-DWITH_INFERENCE=${WITH_INFERENCE:-ON} \
-DWITH_ANAKIN=${WITH_ANAKIN:-OFF} \ -DWITH_ANAKIN=${WITH_ANAKIN:-OFF} \
-DPY_VERSION=${PY_VERSION:-2.7} -DPY_VERSION=${PY_VERSION:-2.7}
} }
......
...@@ -36,6 +36,7 @@ import paddle.fluid as fluid ...@@ -36,6 +36,7 @@ import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
from paddle.fluid import core from paddle.fluid import core
from test_dist_base import TestDistRunnerBase, runtime_main from test_dist_base import TestDistRunnerBase, runtime_main
import paddle.compat as cpt
from paddle.compat import long_type from paddle.compat import long_type
import hashlib import hashlib
...@@ -315,7 +316,8 @@ def pad_batch_data(insts, ...@@ -315,7 +316,8 @@ def pad_batch_data(insts,
""" """
return_list = [] return_list = []
max_len = max(len(inst) for inst in insts) max_len = max(len(inst) for inst in insts)
num_token = reduce(lambda x, y: x + y, num_token = six.moves.reduce(
lambda x, y: x + y,
[len(inst) for inst in insts]) if return_num_token else 0 [len(inst) for inst in insts]) if return_num_token else 0
# Any token included in dict can be used to pad, since the paddings' loss # Any token included in dict can be used to pad, since the paddings' loss
# will be masked out by weights and make no effect on parameter gradients. # will be masked out by weights and make no effect on parameter gradients.
...@@ -328,7 +330,7 @@ def pad_batch_data(insts, ...@@ -328,7 +330,7 @@ def pad_batch_data(insts,
return_list += [inst_weight.astype("float32").reshape([-1, 1])] return_list += [inst_weight.astype("float32").reshape([-1, 1])]
else: # position data else: # position data
inst_pos = np.array([ inst_pos = np.array([
range(1, len(inst) + 1) + [0] * (max_len - len(inst)) list(range(1, len(inst) + 1)) + [0] * (max_len - len(inst))
for inst in insts for inst in insts
]) ])
return_list += [inst_pos.astype("int64").reshape([-1, 1])] return_list += [inst_pos.astype("int64").reshape([-1, 1])]
...@@ -385,10 +387,11 @@ def prepare_batch_input(insts, data_input_names, src_pad_idx, trg_pad_idx, ...@@ -385,10 +387,11 @@ def prepare_batch_input(insts, data_input_names, src_pad_idx, trg_pad_idx,
return_num_token=True) return_num_token=True)
data_input_dict = dict( data_input_dict = dict(
list(
zip(data_input_names, [ zip(data_input_names, [
src_word, src_pos, src_slf_attn_bias, trg_word, trg_pos, src_word, src_pos, src_slf_attn_bias, trg_word, trg_pos,
trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight
])) ])))
return data_input_dict, np.asarray([num_token], dtype="float32") return data_input_dict, np.asarray([num_token], dtype="float32")
...@@ -561,7 +564,7 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler, ...@@ -561,7 +564,7 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler,
np.log(TrainTaskConfig.label_smooth_eps / ( np.log(TrainTaskConfig.label_smooth_eps / (
ModelHyperParams.trg_vocab_size - 1) + 1e-20)) ModelHyperParams.trg_vocab_size - 1) + 1e-20))
init = False init = False
for pass_id in xrange(TrainTaskConfig.pass_num): for pass_id in six.moves.xrange(TrainTaskConfig.pass_num):
pass_start_time = time.time() pass_start_time = time.time()
for batch_id, data in enumerate(train_data()): for batch_id, data in enumerate(train_data()):
if batch_id >= 5: if batch_id >= 5:
...@@ -587,11 +590,11 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler, ...@@ -587,11 +590,11 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler,
ModelHyperParams.eos_idx, ModelHyperParams.n_head, ModelHyperParams.eos_idx, ModelHyperParams.n_head,
ModelHyperParams.d_model) ModelHyperParams.d_model)
total_num_token += num_token total_num_token += num_token
feed_kv_pairs = data_input_dict.items() feed_kv_pairs = list(data_input_dict.items())
if TrainTaskConfig.local: if TrainTaskConfig.local:
feed_kv_pairs += { feed_kv_pairs += list({
lr_scheduler.learning_rate.name: lr_rate lr_scheduler.learning_rate.name: lr_rate
}.items() }.items())
feed_list.append(dict(feed_kv_pairs)) feed_list.append(dict(feed_kv_pairs))
if not init: if not init:
...@@ -873,6 +876,7 @@ class DataReader(object): ...@@ -873,6 +876,7 @@ class DataReader(object):
f = tarfile.open(fpaths[0], "r") f = tarfile.open(fpaths[0], "r")
for line in f.extractfile(tar_fname): for line in f.extractfile(tar_fname):
line = cpt.to_text(line)
fields = line.strip("\n").split(self._field_delimiter) fields = line.strip("\n").split(self._field_delimiter)
if (not self._only_src and len(fields) == 2) or ( if (not self._only_src and len(fields) == 2) or (
self._only_src and len(fields) == 1): self._only_src and len(fields) == 1):
...@@ -882,8 +886,9 @@ class DataReader(object): ...@@ -882,8 +886,9 @@ class DataReader(object):
if not os.path.isfile(fpath): if not os.path.isfile(fpath):
raise IOError("Invalid file: %s" % fpath) raise IOError("Invalid file: %s" % fpath)
with open(fpath, "r") as f: with open(fpath, "rb") as f:
for line in f: for line in f:
line = cpt.to_text(line)
fields = line.strip("\n").split(self._field_delimiter) fields = line.strip("\n").split(self._field_delimiter)
if (not self._only_src and len(fields) == 2) or ( if (not self._only_src and len(fields) == 2) or (
self._only_src and len(fields) == 1): self._only_src and len(fields) == 1):
...@@ -892,8 +897,9 @@ class DataReader(object): ...@@ -892,8 +897,9 @@ class DataReader(object):
@staticmethod @staticmethod
def load_dict(dict_path, reverse=False): def load_dict(dict_path, reverse=False):
word_dict = {} word_dict = {}
with open(dict_path, "r") as fdict: with open(dict_path, "rb") as fdict:
for idx, line in enumerate(fdict): for idx, line in enumerate(fdict):
line = cpt.to_text(line)
if reverse: if reverse:
word_dict[idx] = line.strip("\n") word_dict[idx] = line.strip("\n")
else: else:
...@@ -1034,7 +1040,7 @@ def multi_head_attention(queries, ...@@ -1034,7 +1040,7 @@ def multi_head_attention(queries,
# size of the input as the output dimension size. # size of the input as the output dimension size.
return layers.reshape( return layers.reshape(
x=trans_x, x=trans_x,
shape=map(int, [0, 0, trans_x.shape[2] * trans_x.shape[3]])) shape=list(map(int, [0, 0, trans_x.shape[2] * trans_x.shape[3]])))
def scaled_dot_product_attention(q, k, v, attn_bias, d_model, dropout_rate): def scaled_dot_product_attention(q, k, v, attn_bias, d_model, dropout_rate):
""" """
......
...@@ -16,6 +16,7 @@ from __future__ import print_function ...@@ -16,6 +16,7 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
import six
from op_test import OpTest from op_test import OpTest
...@@ -62,17 +63,20 @@ class PReluTest(OpTest): ...@@ -62,17 +63,20 @@ class PReluTest(OpTest):
# TODO(minqiyang): Resume these test cases after fixing Python3 CI job issues # TODO(minqiyang): Resume these test cases after fixing Python3 CI job issues
# class TestCase1(PReluTest): if six.PY2:
# def initTestCase(self):
# self.attrs = {'mode': "all"}
# class TestCase2(PReluTest): class TestCase1(PReluTest):
# def initTestCase(self): def initTestCase(self):
# self.attrs = {'mode': "channel"} self.attrs = {'mode': "all"}
class TestCase2(PReluTest):
def initTestCase(self):
self.attrs = {'mode': "channel"}
class TestCase3(PReluTest):
def initTestCase(self):
self.attrs = {'mode': "element"}
# class TestCase3(PReluTest):
# def initTestCase(self):
# self.attrs = {'mode': "element"}
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -153,7 +153,7 @@ def block_to_code(block, block_idx): ...@@ -153,7 +153,7 @@ def block_to_code(block, block_idx):
indent += 1 indent += 1
# sort all vars # sort all vars
all_vars = sorted(block.vars.iteritems(), key=lambda x: x[0]) all_vars = sorted(six.iteritems(block.vars), key=lambda x: x[0])
for var in all_vars: for var in all_vars:
print("{}{}".format(get_indent_space(indent), variable_to_code(var[1]))) print("{}{}".format(get_indent_space(indent), variable_to_code(var[1])))
......
...@@ -300,7 +300,7 @@ class DistributeTranspiler(object): ...@@ -300,7 +300,7 @@ class DistributeTranspiler(object):
input_deps = grad_name_to_send_dummy_out.values() input_deps = grad_name_to_send_dummy_out.values()
program.global_block().append_op( program.global_block().append_op(
type="send_barrier", type="send_barrier",
inputs={"X": input_deps}, inputs={"X": list(input_deps)},
outputs={"Out": send_barrier_out}, outputs={"Out": send_barrier_out},
attrs={ attrs={
"endpoints": pserver_endpoints, "endpoints": pserver_endpoints,
...@@ -455,7 +455,7 @@ class DistributeTranspiler(object): ...@@ -455,7 +455,7 @@ class DistributeTranspiler(object):
if len(splited_var) <= 1: if len(splited_var) <= 1:
continue continue
# NOTE: if enable memory optimization, origin vars maybe removed. # NOTE: if enable memory optimization, origin vars maybe removed.
if startup_program.global_block().vars.has_key(varname): if varname in startup_program.global_block().vars:
orig_param = startup_program.global_block().vars[varname] orig_param = startup_program.global_block().vars[varname]
else: else:
origin_param_var = self.origin_program.global_block().vars[ origin_param_var = self.origin_program.global_block().vars[
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册