Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
efee4267
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
efee4267
编写于
8月 24, 2020
作者:
Y
yaoxuefeng
提交者:
GitHub
8月 24, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
support generator seed in related kernals test=develop (#26495)
上级
ae4724cf
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
507 addition
and
55 deletion
+507
-55
paddle/fluid/operators/CMakeLists.txt
paddle/fluid/operators/CMakeLists.txt
+1
-1
paddle/fluid/operators/distributed/CMakeLists.txt
paddle/fluid/operators/distributed/CMakeLists.txt
+1
-1
paddle/fluid/operators/distributed/large_scale_kv.h
paddle/fluid/operators/distributed/large_scale_kv.h
+13
-2
paddle/fluid/operators/dropout_op.h
paddle/fluid/operators/dropout_op.h
+8
-1
paddle/fluid/operators/gaussian_random_op.cc
paddle/fluid/operators/gaussian_random_op.cc
+17
-9
paddle/fluid/operators/math/sampler.cc
paddle/fluid/operators/math/sampler.cc
+20
-5
paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc
paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc
+19
-10
paddle/fluid/operators/randint_op.cc
paddle/fluid/operators/randint_op.cc
+18
-7
paddle/fluid/operators/randperm_op.h
paddle/fluid/operators/randperm_op.h
+12
-4
paddle/fluid/operators/sampling_id_op.h
paddle/fluid/operators/sampling_id_op.h
+4
-1
paddle/fluid/operators/truncated_gaussian_random_op.cc
paddle/fluid/operators/truncated_gaussian_random_op.cc
+18
-8
python/paddle/fluid/generator.py
python/paddle/fluid/generator.py
+1
-1
python/paddle/fluid/tests/unittests/test_generate_proposal_labels_op.py
...fluid/tests/unittests/test_generate_proposal_labels_op.py
+27
-1
python/paddle/fluid/tests/unittests/test_random_seed.py
python/paddle/fluid/tests/unittests/test_random_seed.py
+348
-4
未找到文件。
paddle/fluid/operators/CMakeLists.txt
浏览文件 @
efee4267
...
...
@@ -123,7 +123,7 @@ cc_test(beam_search_decode_op_test SRCS beam_search_decode_op_test.cc DEPS lod_t
cc_test
(
strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor memory
)
cc_test
(
save_load_op_test SRCS save_load_op_test.cc DEPS save_op load_op
)
cc_test
(
save_load_combine_op_test SRCS save_load_combine_op_test.cc DEPS save_combine_op load_combine_op
)
nv_test
(
dropout_op_test SRCS dropout_op_test.cc DEPS dropout_op tensor
)
nv_test
(
dropout_op_test SRCS dropout_op_test.cc DEPS dropout_op tensor
generator
)
if
(
WITH_GPU
)
nv_test
(
test_leaky_relu_grad_grad_functor SRCS test_leaky_relu_grad_grad_functor.cc test_leaky_relu_grad_grad_functor.cu DEPS tensor device_context eigen3
)
else
()
...
...
paddle/fluid/operators/distributed/CMakeLists.txt
浏览文件 @
efee4267
...
...
@@ -61,7 +61,7 @@ cc_test(varhandle_test SRCS varhandle_test.cc DEPS profiler scope)
cc_library
(
parameter_prefetch SRCS parameter_prefetch.cc DEPS sendrecvop_rpc memory
)
cc_library
(
parameter_send SRCS parameter_send.cc DEPS sendrecvop_rpc memory
)
cc_library
(
parameter_recv SRCS parameter_recv.cc DEPS sendrecvop_rpc memory
)
cc_library
(
communicator SRCS communicator.cc DEPS scope selected_rows tensor variable_helper selected_rows_functor simple_threadpool parameter_send parameter_recv
)
cc_library
(
communicator SRCS communicator.cc DEPS scope selected_rows tensor variable_helper selected_rows_functor simple_threadpool parameter_send parameter_recv
generator
)
cc_test
(
communicator_test SRCS communicator_test.cc DEPS communicator
)
if
(
WITH_GPU
)
cc_test
(
collective_server_test SRCS collective_server_test.cc
...
...
paddle/fluid/operators/distributed/large_scale_kv.h
浏览文件 @
efee4267
...
...
@@ -28,6 +28,7 @@
#include <thread> // NOLINT
#include <ThreadPool.h>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/rw_lock.h"
#include "paddle/fluid/framework/selected_rows.h"
...
...
@@ -96,7 +97,12 @@ class UniformInitializer : public Initializer {
dist_
=
std
::
uniform_real_distribution
<
float
>
(
min_
,
max_
);
}
float
GetValue
()
override
{
return
dist_
(
random_engine_
);
}
float
GetValue
()
override
{
return
framework
::
Generator
::
GetInstance
()
->
is_init_py
?
dist_
(
framework
::
Generator
::
GetInstance
()
->
GetCPUEngine
())
:
dist_
(
random_engine_
);
// return dist_(random_engine_);
}
private:
float
min_
;
...
...
@@ -141,7 +147,12 @@ class GaussianInitializer : public Initializer {
dist_
=
std
::
normal_distribution
<
float
>
(
mean_
,
std_
);
}
float
GetValue
()
override
{
return
dist_
(
random_engine_
);
}
float
GetValue
()
override
{
return
framework
::
Generator
::
GetInstance
()
->
is_init_py
?
dist_
(
framework
::
Generator
::
GetInstance
()
->
GetCPUEngine
())
:
dist_
(
random_engine_
);
// return dist_(random_engine_);
}
private:
float
std_
;
...
...
paddle/fluid/operators/dropout_op.h
浏览文件 @
efee4267
...
...
@@ -18,6 +18,7 @@ limitations under the License. */
#include <string>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
namespace
paddle
{
...
...
@@ -55,6 +56,8 @@ class CPUDropoutKernel : public framework::OpKernel<T> {
return
;
}
bool
init_generator_py
=
framework
::
Generator
::
GetInstance
()
->
is_init_py
;
// NOTE: fixed seed should only be used in unittest or for debug.
// Guarantee to use random seed in training.
std
::
random_device
rnd
;
...
...
@@ -71,7 +74,11 @@ class CPUDropoutKernel : public framework::OpKernel<T> {
std
::
uniform_real_distribution
<
float
>
dist
(
0
,
1
);
for
(
size_t
i
=
0
;
i
<
size
;
++
i
)
{
if
(
dist
(
engine
)
<
dropout_prob
)
{
float
cur_random
=
init_generator_py
?
dist
(
framework
::
Generator
::
GetInstance
()
->
GetCPUEngine
())
:
dist
(
engine
);
if
(
cur_random
<
dropout_prob
)
{
mask_data
[
i
]
=
0
;
y_data
[
i
]
=
0
;
}
else
{
...
...
paddle/fluid/operators/gaussian_random_op.cc
浏览文件 @
efee4267
...
...
@@ -14,6 +14,7 @@ limitations under the License. */
#include <random>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/fill_constant_op.h"
#ifdef PADDLE_WITH_MKLDNN
...
...
@@ -31,23 +32,30 @@ class CPUGaussianRandomKernel : public framework::OpKernel<T> {
float
mean
=
context
.
Attr
<
float
>
(
"mean"
);
float
std
=
context
.
Attr
<
float
>
(
"std"
);
auto
*
tensor
=
context
.
Output
<
framework
::
Tensor
>
(
"Out"
);
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
context
.
Attr
<
int
>
(
"seed"
));
std
::
minstd_rand
engine
;
if
(
seed
==
0
)
{
seed
=
std
::
random_device
()();
}
engine
.
seed
(
seed
);
std
::
normal_distribution
<
T
>
dist
(
mean
,
std
);
const
std
::
string
op_type
=
"gaussian_random"
;
auto
shape
=
GetShape
(
context
,
op_type
);
tensor
->
Resize
(
shape
);
int64_t
size
=
tensor
->
numel
();
T
*
data
=
tensor
->
mutable_data
<
T
>
(
context
.
GetPlace
());
for
(
int64_t
i
=
0
;
i
<
size
;
++
i
)
{
data
[
i
]
=
dist
(
engine
);
if
(
framework
::
Generator
::
GetInstance
()
->
is_init_py
)
{
std
::
mt19937_64
&
gen_engine
=
framework
::
Generator
::
GetInstance
()
->
GetCPUEngine
();
for
(
int64_t
i
=
0
;
i
<
size
;
++
i
)
{
data
[
i
]
=
dist
(
gen_engine
);
}
}
else
{
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
context
.
Attr
<
int
>
(
"seed"
));
std
::
minstd_rand
engine
;
if
(
seed
==
0
)
{
seed
=
std
::
random_device
()();
}
engine
.
seed
(
seed
);
for
(
int64_t
i
=
0
;
i
<
size
;
++
i
)
{
data
[
i
]
=
dist
(
engine
);
}
}
}
};
...
...
paddle/fluid/operators/math/sampler.cc
浏览文件 @
efee4267
...
...
@@ -18,6 +18,7 @@ limitations under the License. */
#include <queue>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/generator.h"
namespace
paddle
{
namespace
operators
{
...
...
@@ -31,7 +32,12 @@ UniformSampler::UniformSampler(int64_t range, unsigned int seed)
dist_
=
std
::
make_shared
<
std
::
uniform_int_distribution
<>>
(
0
,
range
);
}
int64_t
UniformSampler
::
Sample
()
const
{
return
(
*
dist_
)(
*
random_engine_
);
}
int64_t
UniformSampler
::
Sample
()
const
{
return
framework
::
Generator
::
GetInstance
()
->
is_init_py
?
(
*
dist_
)(
framework
::
Generator
::
GetInstance
()
->
GetCPUEngine
())
:
(
*
dist_
)(
*
random_engine_
);
// return (*dist_)(*random_engine_);
}
float
UniformSampler
::
Probability
(
int64_t
value
)
const
{
return
inv_range_
;
}
...
...
@@ -46,8 +52,11 @@ int64_t LogUniformSampler::Sample() const {
// inverse_transform_sampling method
// More details:
// https://wanghaoshuang.github.io/2017/11/Log-uniform-distribution-sampler/
const
int64_t
value
=
static_cast
<
int64_t
>
(
exp
((
*
dist_
)(
*
random_engine_
)
*
log_range_
))
-
1
;
auto
cur_random
=
framework
::
Generator
::
GetInstance
()
->
is_init_py
?
(
*
dist_
)(
framework
::
Generator
::
GetInstance
()
->
GetCPUEngine
())
:
(
*
dist_
)(
*
random_engine_
);
const
int64_t
value
=
static_cast
<
int64_t
>
(
exp
(
cur_random
*
log_range_
))
-
1
;
// Mathematically, value should be <= range_, but might not be due to some
// floating point roundoff, so we mod by range_.
return
value
%
range_
;
...
...
@@ -75,8 +84,14 @@ CustomSampler::CustomSampler(int64_t range, const float *probabilities,
}
int64_t
CustomSampler
::
Sample
()
const
{
auto
index
=
(
*
int_dist_
)(
*
random_engine_
);
auto
p
=
(
*
real_dist_
)(
*
random_engine_
);
auto
index
=
framework
::
Generator
::
GetInstance
()
->
is_init_py
?
(
*
int_dist_
)(
framework
::
Generator
::
GetInstance
()
->
GetCPUEngine
())
:
(
*
int_dist_
)(
*
random_engine_
);
auto
p
=
framework
::
Generator
::
GetInstance
()
->
is_init_py
?
(
*
real_dist_
)(
framework
::
Generator
::
GetInstance
()
->
GetCPUEngine
())
:
(
*
real_dist_
)(
*
random_engine_
);
if
(
p
>
alias_probs_
[
index
])
{
int
alias
=
alias_
[
index
];
...
...
paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc
浏览文件 @
efee4267
...
...
@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/operators/fill_constant_op.h"
#include "paddle/fluid/operators/mean_op.h"
...
...
@@ -28,21 +29,29 @@ class GaussianMKLDNNKernel : public paddle::framework::OpKernel<T> {
float
std
=
context
.
Attr
<
float
>
(
"std"
);
auto
*
tensor
=
context
.
Output
<
framework
::
Tensor
>
(
"Out"
);
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
context
.
Attr
<
int
>
(
"seed"
));
std
::
minstd_rand
engine
;
if
(
seed
==
0
)
{
seed
=
std
::
random_device
()();
}
engine
.
seed
(
seed
);
std
::
normal_distribution
<
T
>
dist
(
mean
,
std
);
const
std
::
string
op_type
=
"gaussian_random"
;
auto
shape
=
GetShape
(
context
,
op_type
);
tensor
->
Resize
(
shape
);
T
*
data
=
tensor
->
mutable_data
<
T
>
(
context
.
GetPlace
());
int64_t
size
=
tensor
->
numel
();
for
(
int64_t
i
=
0
;
i
<
size
;
++
i
)
{
data
[
i
]
=
dist
(
engine
);
std
::
normal_distribution
<
T
>
dist
(
mean
,
std
);
if
(
framework
::
Generator
::
GetInstance
()
->
is_init_py
)
{
std
::
mt19937_64
&
gen_engine
=
framework
::
Generator
::
GetInstance
()
->
GetCPUEngine
();
for
(
int64_t
i
=
0
;
i
<
size
;
++
i
)
{
data
[
i
]
=
dist
(
gen_engine
);
}
}
else
{
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
context
.
Attr
<
int
>
(
"seed"
));
std
::
minstd_rand
engine
;
if
(
seed
==
0
)
{
seed
=
std
::
random_device
()();
}
engine
.
seed
(
seed
);
for
(
int64_t
i
=
0
;
i
<
size
;
++
i
)
{
data
[
i
]
=
dist
(
engine
);
}
}
tensor
->
set_layout
(
DataLayout
::
kMKLDNN
);
...
...
paddle/fluid/operators/randint_op.cc
浏览文件 @
efee4267
...
...
@@ -15,6 +15,7 @@
#include <string>
#include <vector>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/uniform_random_op.h"
...
...
@@ -43,15 +44,25 @@ class CPURandintKernel : public framework::OpKernel<T> {
T
*
data
=
out
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
int64_t
size
=
out
->
numel
();
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
ctx
.
Attr
<
int
>
(
"seed"
));
std
::
minstd_rand
engine
;
if
(
seed
==
0
)
{
seed
=
std
::
random_device
()();
}
engine
.
seed
(
seed
);
std
::
uniform_int_distribution
<
T
>
dist
(
ctx
.
Attr
<
int
>
(
"low"
),
ctx
.
Attr
<
int
>
(
"high"
)
-
1
);
for
(
int64_t
i
=
0
;
i
<
size
;
++
i
)
data
[
i
]
=
dist
(
engine
);
if
(
framework
::
Generator
::
GetInstance
()
->
is_init_py
)
{
std
::
mt19937_64
&
gen_engine
=
framework
::
Generator
::
GetInstance
()
->
GetCPUEngine
();
for
(
int64_t
i
=
0
;
i
<
size
;
++
i
)
data
[
i
]
=
dist
(
gen_engine
);
}
else
{
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
ctx
.
Attr
<
int
>
(
"seed"
));
std
::
minstd_rand
engine
;
if
(
seed
==
0
)
{
seed
=
std
::
random_device
()();
}
engine
.
seed
(
seed
);
for
(
int64_t
i
=
0
;
i
<
size
;
++
i
)
{
data
[
i
]
=
dist
(
engine
);
}
}
}
};
...
...
paddle/fluid/operators/randperm_op.h
浏览文件 @
efee4267
...
...
@@ -19,6 +19,7 @@ limitations under the License. */
#include <ctime>
#include <string>
#include <vector>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/place.h"
...
...
@@ -31,11 +32,17 @@ static inline void random_permate(T* data_ptr, int num, unsigned int seed) {
for
(
int
i
=
0
;
i
<
num
;
++
i
)
{
data_ptr
[
i
]
=
static_cast
<
T
>
(
i
);
}
if
(
seed
==
0
)
{
seed
=
std
::
random_device
()();
if
(
framework
::
Generator
::
GetInstance
()
->
is_init_py
)
{
std
::
shuffle
(
data_ptr
,
data_ptr
+
num
,
framework
::
Generator
::
GetInstance
()
->
GetCPUEngine
());
}
else
{
if
(
seed
==
0
)
{
seed
=
std
::
random_device
()();
}
std
::
srand
(
seed
);
std
::
random_shuffle
(
data_ptr
,
data_ptr
+
num
);
}
std
::
srand
(
seed
);
std
::
random_shuffle
(
data_ptr
,
data_ptr
+
num
);
}
template
<
typename
DeviceContext
,
typename
T
>
...
...
@@ -51,6 +58,7 @@ class RandpermKernel : public framework::OpKernel<T> {
if
(
platform
::
is_cpu_place
(
ctx
.
GetPlace
()))
{
T
*
out_data
=
out_tensor
->
mutable_data
<
T
>
(
platform
::
CPUPlace
());
random_permate
<
T
>
(
out_data
,
n
,
seed
);
}
else
{
framework
::
Tensor
tmp_tensor
;
tmp_tensor
.
Resize
(
framework
::
make_ddim
({
n
}));
...
...
paddle/fluid/operators/sampling_id_op.h
浏览文件 @
efee4267
...
...
@@ -21,6 +21,7 @@
#include <sstream>
#include <vector>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
namespace
paddle
{
...
...
@@ -61,7 +62,9 @@ class SamplingIdKernel : public framework::OpKernel<T> {
std
::
vector
<
int64_t
>
ids
(
batch_size
);
for
(
int
i
=
0
;
i
<
batch_size
;
++
i
)
{
T
r
=
dist
(
engine
);
T
r
=
framework
::
Generator
::
GetInstance
()
->
is_init_py
?
dist
(
framework
::
Generator
::
GetInstance
()
->
GetCPUEngine
())
:
dist
(
engine
);
int
idx
=
width
-
1
;
for
(
int
j
=
0
;
j
<
width
;
++
j
)
{
if
((
r
-=
ins_vector
[
i
*
width
+
j
])
<
0
)
{
...
...
paddle/fluid/operators/truncated_gaussian_random_op.cc
浏览文件 @
efee4267
...
...
@@ -14,6 +14,7 @@ limitations under the License. */
#include <limits>
#include <random>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
namespace
paddle
{
...
...
@@ -161,18 +162,27 @@ class CPUTruncatedGaussianRandomKernel : public framework::OpKernel<T> {
auto
*
tensor
=
context
.
Output
<
framework
::
Tensor
>
(
"Out"
);
T
*
data
=
tensor
->
mutable_data
<
T
>
(
context
.
GetPlace
());
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
context
.
Attr
<
int
>
(
"seed"
));
std
::
minstd_rand
engine
;
if
(
seed
==
0
)
{
seed
=
std
::
random_device
()();
}
engine
.
seed
(
seed
);
std
::
uniform_real_distribution
<
T
>
dist
(
std
::
numeric_limits
<
float
>::
min
(),
1.0
);
TruncatedNormal
<
T
>
truncated_normal
(
mean
,
std
);
int64_t
size
=
tensor
->
numel
();
for
(
int64_t
i
=
0
;
i
<
size
;
++
i
)
{
data
[
i
]
=
truncated_normal
(
dist
(
engine
));
if
(
framework
::
Generator
::
GetInstance
()
->
is_init_py
)
{
std
::
mt19937_64
&
gen_engine
=
framework
::
Generator
::
GetInstance
()
->
GetCPUEngine
();
for
(
int64_t
i
=
0
;
i
<
size
;
++
i
)
{
data
[
i
]
=
truncated_normal
(
dist
(
gen_engine
));
}
}
else
{
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
context
.
Attr
<
int
>
(
"seed"
));
std
::
minstd_rand
engine
;
if
(
seed
==
0
)
{
seed
=
std
::
random_device
()();
}
engine
.
seed
(
seed
);
for
(
int64_t
i
=
0
;
i
<
size
;
++
i
)
{
data
[
i
]
=
truncated_normal
(
dist
(
engine
));
}
}
}
};
...
...
python/paddle/fluid/generator.py
浏览文件 @
efee4267
...
...
@@ -29,7 +29,7 @@ class Generator(object):
seed_in
=
default_rng_seed_val
if
self
.
device
==
"CPU"
:
self
.
generator
=
core
.
Generator
()
self
.
generator
.
manual_seed
(
seed_in
)
#
self.generator.manual_seed(seed_in)
else
:
raise
ValueError
(
"generator class with device %s does not exist, currently only support generator with device 'CPU' "
...
...
python/paddle/fluid/tests/unittests/test_generate_proposal_labels_op.py
浏览文件 @
efee4267
...
...
@@ -224,7 +224,8 @@ def _expand_bbox_targets(bbox_targets_input, class_nums, is_cls_agnostic):
class
TestGenerateProposalLabelsOp
(
OpTest
):
def
set_data
(
self
):
self
.
use_random
=
False
#self.use_random = False
self
.
init_use_random
()
self
.
init_test_cascade
()
self
.
init_test_params
()
self
.
init_test_input
()
...
...
@@ -267,6 +268,9 @@ class TestGenerateProposalLabelsOp(OpTest):
def
init_test_cascade
(
self
,
):
self
.
is_cascade_rcnn
=
False
def
init_use_random
(
self
):
self
.
use_random
=
False
def
init_test_params
(
self
):
self
.
batch_size_per_im
=
512
self
.
fg_fraction
=
0.25
...
...
@@ -329,6 +333,28 @@ class TestCascade(TestGenerateProposalLabelsOp):
self
.
is_cascade_rcnn
=
True
class
TestUseRandom
(
TestGenerateProposalLabelsOp
):
def
init_use_random
(
self
):
self
.
use_random
=
True
self
.
is_cascade_rcnn
=
False
def
test_check_output
(
self
):
self
.
check_output_customized
(
self
.
verify_out
)
def
verify_out
(
self
,
outs
):
print
(
"skip"
)
def
init_test_params
(
self
):
self
.
batch_size_per_im
=
512
self
.
fg_fraction
=
0.025
self
.
fg_thresh
=
0.5
self
.
bg_thresh_hi
=
0.5
self
.
bg_thresh_lo
=
0.0
self
.
bbox_reg_weights
=
[
0.1
,
0.1
,
0.2
,
0.2
]
self
.
is_cls_agnostic
=
False
self
.
class_nums
=
2
if
self
.
is_cls_agnostic
else
81
class
TestClsAgnostic
(
TestCascade
):
def
init_test_params
(
self
):
self
.
batch_size_per_im
=
512
...
...
python/paddle/fluid/tests/unittests/test_random_seed.py
浏览文件 @
efee4267
...
...
@@ -92,6 +92,118 @@ class TestGeneratorSeed(unittest.TestCase):
self
.
assertTrue
(
np
.
allclose
(
out1_res2
,
out2_res2
))
self
.
assertTrue
(
not
np
.
allclose
(
out1_res2
,
out1_res1
))
def
test_gen_dropout_dygraph
(
self
):
gen
=
generator
.
Generator
()
fluid
.
enable_dygraph
()
gen
.
manual_seed
(
111111111
)
st
=
gen
.
get_state
()
# x = np.arange(1,101).reshape(2,50).astype("float32")
x
=
fluid
.
layers
.
uniform_random
(
[
2
,
10
],
dtype
=
"float32"
,
min
=
0.0
,
max
=
1.0
)
y
=
fluid
.
layers
.
dropout
(
x
,
0.5
)
gen
.
manual_seed
(
111111111
)
#gen.set_state(st)
x1
=
fluid
.
layers
.
uniform_random
(
[
2
,
10
],
dtype
=
"float32"
,
min
=
0.0
,
max
=
1.0
)
y1
=
fluid
.
layers
.
dropout
(
x1
,
0.5
)
y_np
=
y
.
numpy
()
y1_np
=
y1
.
numpy
()
#print(y_np)
#print(y1_np)
if
not
core
.
is_compiled_with_cuda
():
print
(
">>>>>>> dropout dygraph >>>>>>>"
)
self
.
assertTrue
(
np
.
allclose
(
y_np
,
y1_np
))
def
test_gen_dropout_static
(
self
):
fluid
.
disable_dygraph
()
gen
=
generator
.
Generator
()
gen
.
manual_seed
(
123123143
)
startup_program
=
fluid
.
Program
()
train_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
train_program
,
startup_program
):
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
x_1
=
fluid
.
layers
.
uniform_random
(
shape
=
[
2
,
10
])
y_1
=
fluid
.
layers
.
dropout
(
x_1
,
0.5
)
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
startup_program
)
out1
=
exe
.
run
(
train_program
,
feed
=
{},
fetch_list
=
[
y_1
])
#gen.set_state(cur_state)
gen
.
manual_seed
(
123123143
)
out2
=
exe
.
run
(
train_program
,
feed
=
{},
fetch_list
=
[
y_1
])
out1_np
=
np
.
array
(
out1
[
0
])
out2_np
=
np
.
array
(
out2
[
0
])
# print(out1_np)
# print(out2_np)
if
not
core
.
is_compiled_with_cuda
():
print
(
">>>>>>> dropout static >>>>>>>"
)
self
.
assertTrue
(
np
.
allclose
(
out1_np
,
out2_np
))
def
test_generator_gaussian_random_dygraph
(
self
):
"""Test Generator seed."""
gen
=
generator
.
Generator
()
fluid
.
enable_dygraph
()
gen
.
manual_seed
(
12312321111
)
x
=
fluid
.
layers
.
gaussian_random
([
10
],
dtype
=
"float32"
)
st1
=
gen
.
get_state
()
x1
=
fluid
.
layers
.
gaussian_random
([
10
],
dtype
=
"float32"
)
gen
.
set_state
(
st1
)
x2
=
fluid
.
layers
.
gaussian_random
([
10
],
dtype
=
"float32"
)
gen
.
manual_seed
(
12312321111
)
x3
=
fluid
.
layers
.
gaussian_random
([
10
],
dtype
=
"float32"
)
x_np
=
x
.
numpy
()
x1_np
=
x1
.
numpy
()
x2_np
=
x2
.
numpy
()
x3_np
=
x3
.
numpy
()
if
not
core
.
is_compiled_with_cuda
():
print
(
">>>>>>> gaussian random dygraph >>>>>>>"
)
self
.
assertTrue
(
np
.
allclose
(
x1_np
,
x2_np
))
self
.
assertTrue
(
np
.
allclose
(
x_np
,
x3_np
))
def
test_generator_gaussian_random_static
(
self
):
fluid
.
disable_dygraph
()
gen
=
generator
.
Generator
()
gen
.
manual_seed
(
123123143
)
startup_program
=
fluid
.
Program
()
train_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
train_program
,
startup_program
):
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
result_1
=
fluid
.
layers
.
gaussian_random
(
shape
=
[
3
,
4
])
result_2
=
fluid
.
layers
.
gaussian_random
(
shape
=
[
3
,
4
])
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
startup_program
)
out1
=
exe
.
run
(
train_program
,
feed
=
{},
fetch_list
=
[
result_1
,
result_2
])
#gen.set_state(cur_state)
gen
.
manual_seed
(
123123143
)
out2
=
exe
.
run
(
train_program
,
feed
=
{},
fetch_list
=
[
result_1
,
result_2
])
out1_res1
=
np
.
array
(
out1
[
0
])
out1_res2
=
np
.
array
(
out1
[
1
])
out2_res1
=
np
.
array
(
out2
[
0
])
out2_res2
=
np
.
array
(
out2
[
1
])
if
not
core
.
is_compiled_with_cuda
():
print
(
">>>>>>> gaussian random static >>>>>>>"
)
self
.
assertTrue
(
np
.
allclose
(
out1_res1
,
out2_res1
))
self
.
assertTrue
(
np
.
allclose
(
out1_res2
,
out2_res2
))
self
.
assertTrue
(
not
np
.
allclose
(
out1_res2
,
out1_res1
))
def
test_generator_randint_dygraph
(
self
):
"""Test Generator seed."""
gen
=
generator
.
Generator
()
...
...
@@ -99,21 +211,253 @@ class TestGeneratorSeed(unittest.TestCase):
fluid
.
enable_dygraph
()
gen
.
manual_seed
(
12312321111
)
x
=
paddle
.
randint
(
low
=
1
)
x
=
paddle
.
randint
(
low
=
1
0
,
shape
=
[
10
],
dtype
=
"int32"
)
st1
=
gen
.
get_state
()
x1
=
paddle
.
randint
(
low
=
1
)
x1
=
paddle
.
randint
(
low
=
1
0
,
shape
=
[
10
],
dtype
=
"int32"
)
gen
.
set_state
(
st1
)
x2
=
paddle
.
randint
(
low
=
1
)
x2
=
paddle
.
randint
(
low
=
1
0
,
shape
=
[
10
],
dtype
=
"int32"
)
gen
.
manual_seed
(
12312321111
)
x3
=
paddle
.
randint
(
low
=
1
)
x3
=
paddle
.
randint
(
low
=
1
0
,
shape
=
[
10
],
dtype
=
"int32"
)
x_np
=
x
.
numpy
()
x1_np
=
x1
.
numpy
()
x2_np
=
x2
.
numpy
()
x3_np
=
x3
.
numpy
()
if
not
core
.
is_compiled_with_cuda
():
print
(
">>>>>>> randint dygraph >>>>>>>"
)
self
.
assertTrue
(
np
.
allclose
(
x1_np
,
x2_np
))
self
.
assertTrue
(
np
.
allclose
(
x_np
,
x3_np
))
def
test_generator_ranint_static
(
self
):
fluid
.
disable_dygraph
()
gen
=
generator
.
Generator
()
gen
.
manual_seed
(
123123143
)
startup_program
=
fluid
.
Program
()
train_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
train_program
,
startup_program
):
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
result_1
=
paddle
.
randint
(
low
=
10
,
shape
=
[
3
,
4
])
result_2
=
paddle
.
randint
(
low
=
10
,
shape
=
[
3
,
4
])
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
startup_program
)
out1
=
exe
.
run
(
train_program
,
feed
=
{},
fetch_list
=
[
result_1
,
result_2
])
#gen.set_state(cur_state)
gen
.
manual_seed
(
123123143
)
out2
=
exe
.
run
(
train_program
,
feed
=
{},
fetch_list
=
[
result_1
,
result_2
])
out1_res1
=
np
.
array
(
out1
[
0
])
out1_res2
=
np
.
array
(
out1
[
1
])
out2_res1
=
np
.
array
(
out2
[
0
])
out2_res2
=
np
.
array
(
out2
[
1
])
if
not
core
.
is_compiled_with_cuda
():
print
(
">>>>>>> randint static >>>>>>>"
)
self
.
assertTrue
(
np
.
allclose
(
out1_res1
,
out2_res1
))
self
.
assertTrue
(
np
.
allclose
(
out1_res2
,
out2_res2
))
self
.
assertTrue
(
not
np
.
allclose
(
out1_res2
,
out1_res1
))
def
test_generator_randperm_dygraph
(
self
):
"""Test Generator seed."""
gen
=
generator
.
Generator
()
fluid
.
enable_dygraph
()
gen
.
manual_seed
(
12312321111
)
x
=
paddle
.
randperm
(
10
)
st1
=
gen
.
get_state
()
x1
=
paddle
.
randperm
(
10
)
gen
.
set_state
(
st1
)
x2
=
paddle
.
randperm
(
10
)
gen
.
manual_seed
(
12312321111
)
x3
=
paddle
.
randperm
(
10
)
x_np
=
x
.
numpy
()
x1_np
=
x1
.
numpy
()
x2_np
=
x2
.
numpy
()
x3_np
=
x3
.
numpy
()
# print("## {}".format(x1_np))
# print("## {}".format(x2_np))
if
not
core
.
is_compiled_with_cuda
():
print
(
">>>>>>> randperm dygraph >>>>>>>"
)
self
.
assertTrue
(
np
.
allclose
(
x1_np
,
x2_np
))
self
.
assertTrue
(
np
.
allclose
(
x_np
,
x3_np
))
def
test_generator_randperm_static
(
self
):
fluid
.
disable_dygraph
()
gen
=
generator
.
Generator
()
gen
.
manual_seed
(
123123143
)
startup_program
=
fluid
.
Program
()
train_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
train_program
,
startup_program
):
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
result_1
=
paddle
.
randperm
(
10
)
result_2
=
paddle
.
randperm
(
10
)
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
startup_program
)
out1
=
exe
.
run
(
train_program
,
feed
=
{},
fetch_list
=
[
result_1
,
result_2
])
#gen.set_state(cur_state)
gen
.
manual_seed
(
123123143
)
out2
=
exe
.
run
(
train_program
,
feed
=
{},
fetch_list
=
[
result_1
,
result_2
])
out1_res1
=
np
.
array
(
out1
[
0
])
out1_res2
=
np
.
array
(
out1
[
1
])
out2_res1
=
np
.
array
(
out2
[
0
])
out2_res2
=
np
.
array
(
out2
[
1
])
if
not
core
.
is_compiled_with_cuda
():
print
(
">>>>>>> randperm static >>>>>>>"
)
self
.
assertTrue
(
np
.
allclose
(
out1_res1
,
out2_res1
))
self
.
assertTrue
(
np
.
allclose
(
out1_res2
,
out2_res2
))
self
.
assertTrue
(
not
np
.
allclose
(
out1_res2
,
out1_res1
))
def
test_generator_sampling_id_dygraph
(
self
):
"""Test Generator seed."""
gen
=
generator
.
Generator
()
fluid
.
enable_dygraph
()
gen
.
manual_seed
(
12312321111
)
x
=
fluid
.
layers
.
uniform_random
(
[
10
,
10
],
dtype
=
"float32"
,
min
=
0.0
,
max
=
1.0
)
y
=
fluid
.
layers
.
sampling_id
(
x
)
st1
=
gen
.
get_state
()
x1
=
fluid
.
layers
.
uniform_random
(
[
10
,
10
],
dtype
=
"float32"
,
min
=
0.0
,
max
=
1.0
)
y1
=
fluid
.
layers
.
sampling_id
(
x
)
gen
.
set_state
(
st1
)
x2
=
fluid
.
layers
.
uniform_random
(
[
10
,
10
],
dtype
=
"float32"
,
min
=
0.0
,
max
=
1.0
)
y2
=
fluid
.
layers
.
sampling_id
(
x
)
gen
.
manual_seed
(
12312321111
)
x3
=
fluid
.
layers
.
uniform_random
(
[
10
,
10
],
dtype
=
"float32"
,
min
=
0.0
,
max
=
1.0
)
y3
=
fluid
.
layers
.
sampling_id
(
x
)
x_np
=
y
.
numpy
()
x1_np
=
y1
.
numpy
()
x2_np
=
y2
.
numpy
()
x3_np
=
y3
.
numpy
()
print
(
"## {}"
.
format
(
x1_np
))
print
(
"## {}"
.
format
(
x2_np
))
if
not
core
.
is_compiled_with_cuda
():
print
(
">>>>>>> sampling id dygraph >>>>>>>"
)
self
.
assertTrue
(
np
.
allclose
(
x1_np
,
x2_np
))
self
.
assertTrue
(
np
.
allclose
(
x_np
,
x3_np
))
def
test_generator_randperm_static
(
self
):
fluid
.
disable_dygraph
()
gen
=
generator
.
Generator
()
gen
.
manual_seed
(
123123143
)
startup_program
=
fluid
.
Program
()
train_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
train_program
,
startup_program
):
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
x
=
fluid
.
layers
.
uniform_random
(
shape
=
[
10
,
10
])
result_1
=
fluid
.
layers
.
sampling_id
(
x
)
result_2
=
fluid
.
layers
.
sampling_id
(
x
)
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
startup_program
)
out1
=
exe
.
run
(
train_program
,
feed
=
{},
fetch_list
=
[
result_1
,
result_2
])
#gen.set_state(cur_state)
gen
.
manual_seed
(
123123143
)
out2
=
exe
.
run
(
train_program
,
feed
=
{},
fetch_list
=
[
result_1
,
result_2
])
out1_res1
=
np
.
array
(
out1
[
0
])
out1_res2
=
np
.
array
(
out1
[
1
])
out2_res1
=
np
.
array
(
out2
[
0
])
out2_res2
=
np
.
array
(
out2
[
1
])
if
not
core
.
is_compiled_with_cuda
():
print
(
">>>>>>> sampling id static >>>>>>>"
)
self
.
assertTrue
(
np
.
allclose
(
out1_res1
,
out2_res1
))
self
.
assertTrue
(
np
.
allclose
(
out1_res2
,
out2_res2
))
self
.
assertTrue
(
not
np
.
allclose
(
out1_res2
,
out1_res1
))
def
test_gen_TruncatedNormal_initializer
(
self
):
fluid
.
disable_dygraph
()
gen
=
generator
.
Generator
()
gen
.
manual_seed
(
123123143
)
cur_state
=
gen
.
get_state
()
startup_program
=
fluid
.
Program
()
train_program
=
fluid
.
Program
()
with
fluid
.
program_guard
(
train_program
,
startup_program
):
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
x
=
fluid
.
layers
.
uniform_random
(
shape
=
[
2
,
10
])
result_1
=
fluid
.
layers
.
fc
(
input
=
x
,
size
=
10
,
param_attr
=
fluid
.
initializer
.
TruncatedNormal
(
loc
=
0.0
,
scale
=
2.0
))
result_2
=
fluid
.
layers
.
fc
(
input
=
x
,
size
=
10
,
param_attr
=
fluid
.
initializer
.
TruncatedNormal
(
loc
=
0.0
,
scale
=
2.0
))
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
startup_program
)
out1
=
exe
.
run
(
train_program
,
feed
=
{},
fetch_list
=
[
result_1
,
result_2
])
#gen.set_state(cur_state)
#gen.set_state(cur_state)
gen
.
manual_seed
(
123123143
)
with
fluid
.
program_guard
(
train_program
,
startup_program
):
exe
.
run
(
startup_program
)
out2
=
exe
.
run
(
train_program
,
feed
=
{},
fetch_list
=
[
result_1
,
result_2
])
out1_res1
=
np
.
array
(
out1
[
0
])
out1_res2
=
np
.
array
(
out1
[
1
])
out2_res1
=
np
.
array
(
out2
[
0
])
out2_res2
=
np
.
array
(
out2
[
1
])
print
(
out1_res1
)
print
(
out1_res2
)
print
(
out2_res1
)
print
(
out2_res2
)
if
not
core
.
is_compiled_with_cuda
():
print
(
">>>>>>> sampling id static >>>>>>>"
)
self
.
assertTrue
(
np
.
allclose
(
out1_res1
,
out2_res1
))
self
.
assertTrue
(
np
.
allclose
(
out1_res2
,
out2_res2
))
self
.
assertTrue
(
not
np
.
allclose
(
out1_res2
,
out1_res1
))
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录