From 312f3b86546ddbf7fce2a3bd0fb991c9cca2962a Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 27 Aug 2018 16:58:50 +0800 Subject: [PATCH] Fix random diff between python2 and python3 --- python/paddle/dataset/movielens.py | 5 ++- python/paddle/fluid/layers/nn.py | 42 +++++++++---------- .../fluid/transpiler/distribute_transpiler.py | 5 +-- 3 files changed, 26 insertions(+), 26 deletions(-) diff --git a/python/paddle/dataset/movielens.py b/python/paddle/dataset/movielens.py index c98e0019f7..64bf741481 100644 --- a/python/paddle/dataset/movielens.py +++ b/python/paddle/dataset/movielens.py @@ -24,6 +24,7 @@ set and test set into paddle reader creators. from __future__ import print_function +import numpy as np import zipfile import paddle.dataset.common import re @@ -150,12 +151,12 @@ def __initialize_meta_info__(): def __reader__(rand_seed=0, test_ratio=0.1, is_test=False): fn = __initialize_meta_info__() - rand = random.Random(x=rand_seed) + np.random.seed(rand_seed) with zipfile.ZipFile(file=fn) as package: with package.open('ml-1m/ratings.dat') as rating: for line in rating: line = cpt.to_text(line, encoding='latin') - if (rand.random() < test_ratio) == is_test: + if (np.random.random() < test_ratio) == is_test: uid, mov_id, rating, _ = line.strip().split("::") uid = int(uid) mov_id = int(mov_id) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 66b776c08e..ca10d73b08 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -17,6 +17,7 @@ All layers just related to the neural network. from __future__ import print_function +import numpy as np from ..layer_helper import LayerHelper from ..initializer import Normal, Constant from ..framework import Variable @@ -24,7 +25,6 @@ from ..param_attr import ParamAttr from .layer_function_generator import autodoc, templatedoc from .tensor import concat from . import utils -import random from .. import unique_name from functools import reduce @@ -5102,7 +5102,7 @@ def random_crop(x, shape, seed=None): dtype = x.dtype out = helper.create_tmp_variable(dtype) if seed is None: - seed = random.randint(-65536, 65535) + seed = np.random.randint(-65536, 65536) op_attrs = {"shape": shape} if isinstance(seed, int): op_attrs["startup_seed"] = seed @@ -5416,7 +5416,7 @@ def prelu(x, mode, param_attr=None, name=None): channel:elements in a channel share same weight element:each element has a weight name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + will be named automatically. Returns: Variable: The output tensor with the same shape as input. @@ -5530,23 +5530,23 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None): Supposing :code:`x` is a Tensor with shape [d_1, d_2, ..., d_n], the :code:`y` is a mask with shape [d_1, d_2, ..., d_n, maxlen], where: - + .. math:: - + y(i_1, i_2,..., i_n, j) = (j < x(i_1, i_2,..., i_n)) Args: - x (Variable): Input tensor of sequence_mask layer, + x (Variable): Input tensor of sequence_mask layer, whose elements are integers less than :code:`maxlen`. maxlen (int|None): Maximum length of the sequence. If :code:`maxlen` is None, it would be replace with :math:`max(x)`. dtype (np.dtype|core.VarDesc.VarType|str): Data type of the output. - name (str|None): A name for this layer(optional). If set None, the - layer will be named automatically. - + name (str|None): A name for this layer(optional). If set None, the + layer will be named automatically. + Returns: Variable: The output sequence mask. - + """ helper = LayerHelper('sequence_mask', **locals()) @@ -5571,23 +5571,23 @@ def stack(x, axis=0): **Stack Layer** This layer stacks all of the input :code:`x` along axis. - - Input :code:`x` can be a single variable, a :code:`list` of variables, - or a :code:`tuple` of variables. If :code:`x` is a :code:`list` or - :code:`tuple`, the shapes of all these variables must be the same. - Supposing the shape of each input is :math:`[d_0, d_1, ..., d_{n-1}]`, - the shape of the output variable would be - :math:`[d_0, d_1, ..., d_{axis}=len(x), ..., d_{n-1}]`. + + Input :code:`x` can be a single variable, a :code:`list` of variables, + or a :code:`tuple` of variables. If :code:`x` is a :code:`list` or + :code:`tuple`, the shapes of all these variables must be the same. + Supposing the shape of each input is :math:`[d_0, d_1, ..., d_{n-1}]`, + the shape of the output variable would be + :math:`[d_0, d_1, ..., d_{axis}=len(x), ..., d_{n-1}]`. If :code:`axis` < 0, it would be replaced with :code:`axis+rank(x[0])+1`. - If :code:`axis` is None, it would be replaced with 0. + If :code:`axis` is None, it would be replaced with 0. Args: - x (Variable|list(Variable)|tuple(Variable)): Input variables. + x (Variable|list(Variable)|tuple(Variable)): Input variables. axis (int|None): The axis along which all inputs are stacked. - + Returns: Variable: The stacked variable. - + """ helper = LayerHelper('stack', **locals()) diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 80d9758b3d..28ae89acd3 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -31,7 +31,6 @@ Steps to transpile pserver: """ import math -import random import numpy as np import collections import six @@ -239,8 +238,8 @@ class DistributeTranspiler(object): grad_var_mapping_items = list(six.iteritems(self.grad_var_mapping)) if not self.config.slice_var_up: - random.seed(self.origin_program.random_seed) - random.shuffle(grad_var_mapping_items) + np.random.seed(self.origin_program.random_seed) + np.random.shuffle(grad_var_mapping_items) grad_name_to_send_dummy_out = dict() for grad_varname, splited_vars in grad_var_mapping_items: -- GitLab