提交 312f3b86 编写于 作者: M minqiyang

Fix random diff between python2 and python3

上级 3e1050a2
...@@ -24,6 +24,7 @@ set and test set into paddle reader creators. ...@@ -24,6 +24,7 @@ set and test set into paddle reader creators.
from __future__ import print_function from __future__ import print_function
import numpy as np
import zipfile import zipfile
import paddle.dataset.common import paddle.dataset.common
import re import re
...@@ -150,12 +151,12 @@ def __initialize_meta_info__(): ...@@ -150,12 +151,12 @@ def __initialize_meta_info__():
def __reader__(rand_seed=0, test_ratio=0.1, is_test=False): def __reader__(rand_seed=0, test_ratio=0.1, is_test=False):
fn = __initialize_meta_info__() fn = __initialize_meta_info__()
rand = random.Random(x=rand_seed) np.random.seed(rand_seed)
with zipfile.ZipFile(file=fn) as package: with zipfile.ZipFile(file=fn) as package:
with package.open('ml-1m/ratings.dat') as rating: with package.open('ml-1m/ratings.dat') as rating:
for line in rating: for line in rating:
line = cpt.to_text(line, encoding='latin') line = cpt.to_text(line, encoding='latin')
if (rand.random() < test_ratio) == is_test: if (np.random.random() < test_ratio) == is_test:
uid, mov_id, rating, _ = line.strip().split("::") uid, mov_id, rating, _ = line.strip().split("::")
uid = int(uid) uid = int(uid)
mov_id = int(mov_id) mov_id = int(mov_id)
......
...@@ -17,6 +17,7 @@ All layers just related to the neural network. ...@@ -17,6 +17,7 @@ All layers just related to the neural network.
from __future__ import print_function from __future__ import print_function
import numpy as np
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
from ..initializer import Normal, Constant from ..initializer import Normal, Constant
from ..framework import Variable from ..framework import Variable
...@@ -24,7 +25,6 @@ from ..param_attr import ParamAttr ...@@ -24,7 +25,6 @@ from ..param_attr import ParamAttr
from .layer_function_generator import autodoc, templatedoc from .layer_function_generator import autodoc, templatedoc
from .tensor import concat from .tensor import concat
from . import utils from . import utils
import random
from .. import unique_name from .. import unique_name
from functools import reduce from functools import reduce
...@@ -5102,7 +5102,7 @@ def random_crop(x, shape, seed=None): ...@@ -5102,7 +5102,7 @@ def random_crop(x, shape, seed=None):
dtype = x.dtype dtype = x.dtype
out = helper.create_tmp_variable(dtype) out = helper.create_tmp_variable(dtype)
if seed is None: if seed is None:
seed = random.randint(-65536, 65535) seed = np.random.randint(-65536, 65536)
op_attrs = {"shape": shape} op_attrs = {"shape": shape}
if isinstance(seed, int): if isinstance(seed, int):
op_attrs["startup_seed"] = seed op_attrs["startup_seed"] = seed
...@@ -5416,7 +5416,7 @@ def prelu(x, mode, param_attr=None, name=None): ...@@ -5416,7 +5416,7 @@ def prelu(x, mode, param_attr=None, name=None):
channel:elements in a channel share same weight channel:elements in a channel share same weight
element:each element has a weight element:each element has a weight
name(str|None): A name for this layer(optional). If set None, the layer name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically. will be named automatically.
Returns: Returns:
Variable: The output tensor with the same shape as input. Variable: The output tensor with the same shape as input.
...@@ -5530,23 +5530,23 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None): ...@@ -5530,23 +5530,23 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
Supposing :code:`x` is a Tensor with shape [d_1, d_2, ..., d_n], the Supposing :code:`x` is a Tensor with shape [d_1, d_2, ..., d_n], the
:code:`y` is a mask with shape [d_1, d_2, ..., d_n, maxlen], where: :code:`y` is a mask with shape [d_1, d_2, ..., d_n, maxlen], where:
.. math:: .. math::
y(i_1, i_2,..., i_n, j) = (j < x(i_1, i_2,..., i_n)) y(i_1, i_2,..., i_n, j) = (j < x(i_1, i_2,..., i_n))
Args: Args:
x (Variable): Input tensor of sequence_mask layer, x (Variable): Input tensor of sequence_mask layer,
whose elements are integers less than :code:`maxlen`. whose elements are integers less than :code:`maxlen`.
maxlen (int|None): Maximum length of the sequence. If :code:`maxlen` maxlen (int|None): Maximum length of the sequence. If :code:`maxlen`
is None, it would be replace with :math:`max(x)`. is None, it would be replace with :math:`max(x)`.
dtype (np.dtype|core.VarDesc.VarType|str): Data type of the output. dtype (np.dtype|core.VarDesc.VarType|str): Data type of the output.
name (str|None): A name for this layer(optional). If set None, the name (str|None): A name for this layer(optional). If set None, the
layer will be named automatically. layer will be named automatically.
Returns: Returns:
Variable: The output sequence mask. Variable: The output sequence mask.
""" """
helper = LayerHelper('sequence_mask', **locals()) helper = LayerHelper('sequence_mask', **locals())
...@@ -5571,23 +5571,23 @@ def stack(x, axis=0): ...@@ -5571,23 +5571,23 @@ def stack(x, axis=0):
**Stack Layer** **Stack Layer**
This layer stacks all of the input :code:`x` along axis. This layer stacks all of the input :code:`x` along axis.
Input :code:`x` can be a single variable, a :code:`list` of variables, Input :code:`x` can be a single variable, a :code:`list` of variables,
or a :code:`tuple` of variables. If :code:`x` is a :code:`list` or or a :code:`tuple` of variables. If :code:`x` is a :code:`list` or
:code:`tuple`, the shapes of all these variables must be the same. :code:`tuple`, the shapes of all these variables must be the same.
Supposing the shape of each input is :math:`[d_0, d_1, ..., d_{n-1}]`, Supposing the shape of each input is :math:`[d_0, d_1, ..., d_{n-1}]`,
the shape of the output variable would be the shape of the output variable would be
:math:`[d_0, d_1, ..., d_{axis}=len(x), ..., d_{n-1}]`. :math:`[d_0, d_1, ..., d_{axis}=len(x), ..., d_{n-1}]`.
If :code:`axis` < 0, it would be replaced with :code:`axis+rank(x[0])+1`. If :code:`axis` < 0, it would be replaced with :code:`axis+rank(x[0])+1`.
If :code:`axis` is None, it would be replaced with 0. If :code:`axis` is None, it would be replaced with 0.
Args: Args:
x (Variable|list(Variable)|tuple(Variable)): Input variables. x (Variable|list(Variable)|tuple(Variable)): Input variables.
axis (int|None): The axis along which all inputs are stacked. axis (int|None): The axis along which all inputs are stacked.
Returns: Returns:
Variable: The stacked variable. Variable: The stacked variable.
""" """
helper = LayerHelper('stack', **locals()) helper = LayerHelper('stack', **locals())
......
...@@ -31,7 +31,6 @@ Steps to transpile pserver: ...@@ -31,7 +31,6 @@ Steps to transpile pserver:
""" """
import math import math
import random
import numpy as np import numpy as np
import collections import collections
import six import six
...@@ -239,8 +238,8 @@ class DistributeTranspiler(object): ...@@ -239,8 +238,8 @@ class DistributeTranspiler(object):
grad_var_mapping_items = list(six.iteritems(self.grad_var_mapping)) grad_var_mapping_items = list(six.iteritems(self.grad_var_mapping))
if not self.config.slice_var_up: if not self.config.slice_var_up:
random.seed(self.origin_program.random_seed) np.random.seed(self.origin_program.random_seed)
random.shuffle(grad_var_mapping_items) np.random.shuffle(grad_var_mapping_items)
grad_name_to_send_dummy_out = dict() grad_name_to_send_dummy_out = dict()
for grad_varname, splited_vars in grad_var_mapping_items: for grad_varname, splited_vars in grad_var_mapping_items:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册