未验证 提交 844583c8 编写于 作者: L Leo Chen 提交者: GitHub

Refine paddle.manual_seed (#26496)

* refine manual seed

* fix ci problem

* fix unittests

* fix unittest

* set is_init_py=false in manual_seed

* fix unittest

* fix bernoulli_op

* fix(unittest): change random_seed to manual_seed

* 🐞fix(unittest): fix manual_seed

* trigger ci

* fix test_sentiment

* fix test_imperative_save_load

* fix test_uniform_random_op

* fix test_uniform_random_op

* fix test_jit_save_load

* merge develop

* fix manual_seed

* fix manual_seed

* use global engine

* use shared_ptr

* fix double free

* fix bug

* fix bug

* fix bug

* fix test bug

* fix test bug

* fix test bug

* fix ci
上级 31f422ae
......@@ -12,67 +12,122 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/generator.h"
#include <glog/logging.h>
#include <deque>
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include "paddle/fluid/framework/generator.h"
namespace paddle {
namespace framework {
std::shared_ptr<Generator> Generator::gen_instance_ = NULL;
const std::shared_ptr<Generator>& DefaultCPUGenerator() {
static auto default_cpu_generator =
std::make_shared<Generator>(GetRandomSeed());
VLOG(4) << "initial seed: " << default_cpu_generator->GetCurrentSeed()
<< ", cpu engine: " << default_cpu_generator->GetCPUEngine().get();
return default_cpu_generator;
}
std::shared_ptr<std::mt19937_64> OpDefaultCPUEngine() {
static auto op_default_cpu_engine = std::make_shared<std::mt19937_64>();
return op_default_cpu_engine;
}
// NOTE(zhiqiu): there are 3 conditions:
// (1) op seed is not set and DefaultCPUGenerator is inited, use
// DefaultCPUGenerator
// (2) op seed is not set and DefaultCPUGenerator is not inited, use se
// OpDefaultCPUEngine() and set a radnom seed
// (3) op seed is set, use OpDefaultCPUEngine() and set the seed
std::shared_ptr<std::mt19937_64> GetCPURandomEngine(uint64_t seed) {
if (DefaultCPUGenerator()->GetIsInitPy() && seed == 0) {
VLOG(4) << "Use random engine from generator";
return DefaultCPUGenerator()->GetCPUEngine();
} else {
// NOTE(zhiqiu): creating an engine instance everytime instead of using
// OpDefaultCPUEngine(), this is the legacy behavior of random operators.
// The benefit is that when runing PE with fixed-seed in multiple thrads,
// each thread has their own engine, and doesn't affect each other.
//
// And we need to measure the determinacy of Generator in PE.
auto engine = std::make_shared<std::mt19937_64>();
if (seed == 0) {
seed = GetRandomSeed();
VLOG(4) << "Use default random engine with random seed = " << seed;
} else {
VLOG(4) << "Use default random engine with fixed random seed = " << seed;
}
static std::mutex mu_;
{
std::lock_guard<std::mutex> lock(mu_);
engine->seed(seed);
}
return engine;
}
}
GeneratorState* Generator::GetState() {
std::lock_guard<std::mutex> lock(this->mutex);
return this->state_.get();
GeneratorState Generator::GetState() {
std::lock_guard<std::mutex> lock(this->mu_);
state_.cpu_engine = *engine_;
return this->state_;
}
void Generator::SetState(GeneratorState* state_in) {
std::lock_guard<std::mutex> lock(this->mutex);
*this->state_ = *state_in;
void Generator::SetState(const GeneratorState& state) {
std::lock_guard<std::mutex> lock(this->mu_);
this->state_ = state;
this->engine_ = std::make_shared<std::mt19937_64>(state.cpu_engine);
}
uint64_t Generator::GetCurrentSeed() {
std::lock_guard<std::mutex> lock(this->mutex);
return this->state_->current_seed;
std::lock_guard<std::mutex> lock(this->mu_);
return this->state_.current_seed;
}
uint64_t Generator::Seed() {
std::lock_guard<std::mutex> lock(this->mutex);
std::lock_guard<std::mutex> lock(this->mu_);
uint64_t seed;
std::random_device de;
seed = ((((uint64_t)de()) << 32) + de()) & 0x1FFFFFFFFFFFFF;
this->state_->current_seed = seed;
this->state_.current_seed = seed;
std::seed_seq seq({seed});
this->state_->cpu_engine.seed(seq);
this->engine_->seed(seq);
return this->state_->current_seed;
return this->state_.current_seed;
}
void Generator::SetCurrentSeed(uint64_t seed) {
std::lock_guard<std::mutex> lock(this->mutex);
this->state_->current_seed = uint64_t(seed);
std::lock_guard<std::mutex> lock(this->mu_);
this->state_.current_seed = seed;
std::seed_seq seq({seed});
this->state_->cpu_engine.seed(seq);
this->engine_->seed(seq);
}
std::mt19937_64& Generator::GetCPUEngine() {
std::lock_guard<std::mutex> lock(this->mutex);
return this->state_->cpu_engine;
std::shared_ptr<std::mt19937_64> Generator::GetCPUEngine() {
std::lock_guard<std::mutex> lock(this->mu_);
return this->engine_;
}
void Generator::SetCPUEngine(std::mt19937_64 engine) {
std::lock_guard<std::mutex> lock(this->mutex);
this->state_->cpu_engine = std::mt19937_64(engine);
void Generator::SetCPUEngine(std::shared_ptr<std::mt19937_64> engine) {
std::lock_guard<std::mutex> lock(this->mu_);
this->engine_ = engine;
}
uint64_t Generator::Random64() {
std::lock_guard<std::mutex> lock(this->mutex);
return this->state_->cpu_engine();
std::lock_guard<std::mutex> lock(this->mu_);
auto engine = this->engine_;
return (*engine)();
}
void Generator::SetIsInitPy(bool is_init_py) {
this->is_init_py_ = is_init_py;
VLOG(4) << "SetIsInitPy:" << this->is_init_py_;
}
bool Generator::GetIsInitPy() const { return this->is_init_py_; }
} // namespace framework
} // namespace paddle
......@@ -14,7 +14,9 @@ limitations under the License. */
#pragma once
#include <glog/logging.h>
#include <stdint.h>
#include <atomic>
#include <deque>
#include <iostream> // temp for debug
......@@ -27,6 +29,12 @@ limitations under the License. */
namespace paddle {
namespace framework {
static uint64_t GetRandomSeed() {
std::random_device rd;
// double has 53 bit significant, so limit uint64 to 53 bits
return ((((uint64_t)rd()) << 32) + rd()) & 0x1FFFFFFFFFFFFF;
}
struct GeneratorState {
int64_t device = -1;
uint64_t current_seed = 34342423252;
......@@ -35,62 +43,67 @@ struct GeneratorState {
struct Generator {
Generator() {
GeneratorState default_gen_state_cpu;
default_gen_state_cpu.device = -1;
default_gen_state_cpu.current_seed = 34342423252;
std::seed_seq seq({34342423252});
default_gen_state_cpu.cpu_engine = std::mt19937_64(seq);
this->state_ = std::make_shared<GeneratorState>(default_gen_state_cpu);
auto seed = GetRandomSeed();
std::seed_seq seq({seed});
auto engine = std::make_shared<std::mt19937_64>(seq);
this->state_.cpu_engine = *engine;
this->state_.device = -1;
this->state_.current_seed = seed;
this->engine_ = engine;
VLOG(4) << "initial seed: " << this->state_.current_seed
<< ", cpu engine: " << &this->state_.cpu_engine;
}
explicit Generator(uint64_t seed) {
std::seed_seq seq({seed});
auto engine = std::make_shared<std::mt19937_64>(seq);
this->state_.cpu_engine = *engine;
this->state_.device = -1;
this->state_.current_seed = seed;
this->engine_ = engine;
VLOG(4) << "initial seed: " << this->state_.current_seed
<< ", cpu engine: " << &this->state_.cpu_engine;
this->is_init_py_ = true; // TODO(zhiqiu): remove it in future
}
explicit Generator(GeneratorState state_in)
: state_{std::make_shared<GeneratorState>(state_in)} {}
Generator(const Generator& other)
: Generator(other, std::lock_guard<std::mutex>(other.mutex)) {}
Generator(const Generator& other) = delete;
// get random state
GeneratorState* GetState();
GeneratorState GetState();
// set random state
void SetState(GeneratorState* state_in);
void SetState(const GeneratorState&);
// get current seed
uint64_t GetCurrentSeed();
// random a seed and get
uint64_t Seed();
// set seed
void SetCurrentSeed(uint64_t seed);
// get cpu engine
std::mt19937_64& GetCPUEngine();
std::shared_ptr<std::mt19937_64> GetCPUEngine();
// set cpu engine
void SetCPUEngine(std::mt19937_64 engine);
void SetCPUEngine(std::shared_ptr<std::mt19937_64>);
uint64_t Random64();
bool is_init_py = false;
void SetIsInitPy(bool);
bool GetIsInitPy() const;
// CPU Generator singleton
static std::shared_ptr<Generator> GetInstance() {
if (NULL == gen_instance_) {
gen_instance_.reset(new paddle::framework::Generator());
}
return gen_instance_;
}
private:
GeneratorState state_;
std::shared_ptr<std::mt19937_64> engine_;
mutable std::mutex mu_;
// NOTE(zhiqiu): is_init_py_ is used to make generator be compatible with
// old seed, and it should be removed after all random-related operators
// and unittests upgrades to use generator.
bool is_init_py_ = false;
};
static std::shared_ptr<Generator> GetInstanceX() {
if (NULL == gen_instance_) {
gen_instance_.reset(new paddle::framework::Generator());
}
gen_instance_->is_init_py = true;
return gen_instance_;
}
// The DefaultCPUGenerator is used in manual_seed()
const std::shared_ptr<Generator>& DefaultCPUGenerator();
private:
static std::shared_ptr<Generator> gen_instance_;
std::shared_ptr<GeneratorState> state_;
mutable std::mutex mutex;
// If op seed is set or global is not set, the OpDefaultCPUEngine is used.
std::shared_ptr<std::mt19937_64> OpDefaultCPUEngine();
Generator(const Generator& other, const std::lock_guard<std::mutex>&)
: state_(std::make_shared<GeneratorState>(*(other.state_))) {}
};
std::shared_ptr<std::mt19937_64> GetCPURandomEngine(uint64_t);
} // namespace framework
} // namespace paddle
......@@ -64,11 +64,11 @@ class BernoulliOpKernel<platform::CPUDeviceContext, T>
int64_t size = x->numel();
std::uniform_real_distribution<T> dist(0.0, 1.0);
auto gen_ptr = framework::Generator::GetInstance();
std::mt19937_64 &gen_engine = gen_ptr->GetCPUEngine();
auto gen_ptr = framework::DefaultCPUGenerator();
auto engine = gen_ptr->GetCPUEngine();
for (int64_t i = 0; i < size; ++i) {
out_data[i] = BernoulliFunctor(in_data[i], dist(gen_engine));
out_data[i] = BernoulliFunctor(in_data[i], dist(*engine));
}
}
}; // namespace operators
......
......@@ -14,20 +14,19 @@
#pragma once
#include <ThreadPool.h>
#include <gflags/gflags.h>
#include <functional>
#include <future> // NOLINT
#include <memory>
#include <string>
#include <thread> // NOLINT
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include <thread> // NOLINT
#include <ThreadPool.h>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/rw_lock.h"
......@@ -89,26 +88,17 @@ class UniformInitializer : public Initializer {
min_ = std::stof(attrs[2]);
max_ = std::stof(attrs[3]);
if (seed_ == 0) {
seed_ = std::random_device()();
}
random_engine_.seed(seed_);
dist_ = std::uniform_real_distribution<float>(min_, max_);
random_engine_ = framework::GetCPURandomEngine(seed_);
}
float GetValue() override {
return framework::Generator::GetInstance()->is_init_py
? dist_(framework::Generator::GetInstance()->GetCPUEngine())
: dist_(random_engine_);
// return dist_(random_engine_);
}
float GetValue() override { return dist_(*random_engine_); }
private:
float min_;
float max_;
std::minstd_rand random_engine_;
std::shared_ptr<std::mt19937_64> random_engine_;
std::uniform_real_distribution<float> dist_;
};
......@@ -139,26 +129,18 @@ class GaussianInitializer : public Initializer {
mean_ = std::stof(attrs[2]);
std_ = std::stof(attrs[3]);
if (seed_ == 0) {
seed_ = std::random_device()();
}
random_engine_ = framework::GetCPURandomEngine(seed_);
random_engine_.seed(seed_);
dist_ = std::normal_distribution<float>(mean_, std_);
}
float GetValue() override {
return framework::Generator::GetInstance()->is_init_py
? dist_(framework::Generator::GetInstance()->GetCPUEngine())
: dist_(random_engine_);
// return dist_(random_engine_);
}
float GetValue() override { return dist_(*random_engine_); }
private:
float std_;
float mean_;
std::minstd_rand random_engine_;
std::shared_ptr<std::mt19937_64> random_engine_;
std::normal_distribution<float> dist_;
};
......
......@@ -55,30 +55,22 @@ class CPUDropoutKernel : public framework::OpKernel<T> {
std::memset(mask_data, 0, size * sizeof(*mask_data)); // NOLINT
return;
}
bool init_generator_py = framework::Generator::GetInstance()->is_init_py;
// std::minstd_rand engine;
// NOTE: fixed seed should only be used in unittest or for debug.
// Guarantee to use random seed in training.
std::random_device rnd;
std::minstd_rand engine;
int seed_data;
int seed_data = 0;
if (seed) {
seed_data = *(seed->data<int>());
} else {
seed_data =
context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : rnd();
context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : 0;
}
engine.seed(seed_data);
auto engine = framework::GetCPURandomEngine(seed_data);
std::uniform_real_distribution<float> dist(0, 1);
for (size_t i = 0; i < size; ++i) {
float cur_random =
init_generator_py
? dist(framework::Generator::GetInstance()->GetCPUEngine())
: dist(engine);
if (cur_random < dropout_prob) {
if (dist(*engine) < dropout_prob) {
mask_data[i] = 0;
y_data[i] = 0;
} else {
......
......@@ -39,26 +39,14 @@ class CPUGaussianRandomKernel : public framework::OpKernel<T> {
tensor->Resize(shape);
int64_t size = tensor->numel();
T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed);
if (framework::Generator::GetInstance()->is_init_py) {
std::mt19937_64& gen_engine =
framework::Generator::GetInstance()->GetCPUEngine();
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(gen_engine);
}
} else {
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
std::minstd_rand engine;
if (seed == 0) {
seed = std::random_device()();
}
engine.seed(seed);
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(engine);
}
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(*engine);
}
}
};
}; // namespace operators
template <typename T>
class CPUGaussianRandomBatchSizeLikeKernel : public framework::OpKernel<T> {
......
......@@ -13,11 +13,14 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/sampler.h"
#include <glog/logging.h>
#include <iostream>
#include <queue>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/generator.h"
namespace paddle {
......@@ -28,22 +31,17 @@ Sampler::~Sampler() {}
UniformSampler::UniformSampler(int64_t range, unsigned int seed)
: Sampler(range, seed), inv_range_(1.0 / (range + 1)) {
random_engine_ = std::make_shared<std::mt19937_64>(seed_);
random_engine_ = framework::GetCPURandomEngine(seed_);
dist_ = std::make_shared<std::uniform_int_distribution<>>(0, range);
}
int64_t UniformSampler::Sample() const {
return framework::Generator::GetInstance()->is_init_py
? (*dist_)(framework::Generator::GetInstance()->GetCPUEngine())
: (*dist_)(*random_engine_);
// return (*dist_)(*random_engine_);
}
int64_t UniformSampler::Sample() const { return (*dist_)(*random_engine_); }
float UniformSampler::Probability(int64_t value) const { return inv_range_; }
LogUniformSampler::LogUniformSampler(int64_t range, unsigned int seed)
: Sampler(range, seed), log_range_(log(range + 1)) {
random_engine_ = std::make_shared<std::mt19937_64>(seed_);
random_engine_ = framework::GetCPURandomEngine(seed_);
dist_ = std::make_shared<std::uniform_real_distribution<>>(0, 1);
}
......@@ -52,10 +50,7 @@ int64_t LogUniformSampler::Sample() const {
// inverse_transform_sampling method
// More details:
// https://wanghaoshuang.github.io/2017/11/Log-uniform-distribution-sampler/
auto cur_random =
framework::Generator::GetInstance()->is_init_py
? (*dist_)(framework::Generator::GetInstance()->GetCPUEngine())
: (*dist_)(*random_engine_);
auto cur_random = (*dist_)(*random_engine_);
const int64_t value = static_cast<int64_t>(exp(cur_random * log_range_)) - 1;
// Mathematically, value should be <= range_, but might not be due to some
// floating point roundoff, so we mod by range_.
......@@ -74,7 +69,7 @@ CustomSampler::CustomSampler(int64_t range, const float *probabilities,
const int *alias, const float *alias_probabilities,
unsigned int seed)
: Sampler(range, seed) {
random_engine_ = std::make_shared<std::mt19937>(seed_);
random_engine_ = framework::GetCPURandomEngine(seed_);
real_dist_ = std::make_shared<std::uniform_real_distribution<>>(0, 1);
int_dist_ = std::make_shared<std::uniform_int_distribution<>>(0, range);
......@@ -84,14 +79,8 @@ CustomSampler::CustomSampler(int64_t range, const float *probabilities,
}
int64_t CustomSampler::Sample() const {
auto index =
framework::Generator::GetInstance()->is_init_py
? (*int_dist_)(framework::Generator::GetInstance()->GetCPUEngine())
: (*int_dist_)(*random_engine_);
auto p =
framework::Generator::GetInstance()->is_init_py
? (*real_dist_)(framework::Generator::GetInstance()->GetCPUEngine())
: (*real_dist_)(*random_engine_);
auto index = (*int_dist_)(*random_engine_);
auto p = (*real_dist_)(*random_engine_);
if (p > alias_probs_[index]) {
int alias = alias_[index];
......
......@@ -26,8 +26,8 @@ namespace math {
// TODO(wanghaoshuang): Support for GPU
/**
* Sample integers from [0, range).
*/
* Sample integers from [0, range).
*/
class Sampler {
public:
explicit Sampler(int64_t range, unsigned int seed = 0UL) : range_(range) {
......@@ -117,7 +117,7 @@ class CustomSampler : public Sampler {
const int* alias_;
const float* probs_;
const int exceptional_val = -1;
std::shared_ptr<std::mt19937> random_engine_;
std::shared_ptr<std::mt19937_64> random_engine_;
std::shared_ptr<std::uniform_real_distribution<>> real_dist_;
std::shared_ptr<std::uniform_int_distribution<>> int_dist_;
};
......
......@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/operators/fill_constant_op.h"
#include "paddle/fluid/operators/mean_op.h"
......@@ -35,23 +36,11 @@ class GaussianMKLDNNKernel : public paddle::framework::OpKernel<T> {
T* data = tensor->mutable_data<T>(context.GetPlace());
int64_t size = tensor->numel();
std::normal_distribution<T> dist(mean, std);
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed);
if (framework::Generator::GetInstance()->is_init_py) {
std::mt19937_64& gen_engine =
framework::Generator::GetInstance()->GetCPUEngine();
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(gen_engine);
}
} else {
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
std::minstd_rand engine;
if (seed == 0) {
seed = std::random_device()();
}
engine.seed(seed);
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(engine);
}
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(*engine);
}
tensor->set_layout(DataLayout::kMKLDNN);
......
......@@ -46,22 +46,11 @@ class CPURandintKernel : public framework::OpKernel<T> {
std::uniform_int_distribution<T> dist(ctx.Attr<int>("low"),
ctx.Attr<int>("high") - 1);
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed);
if (framework::Generator::GetInstance()->is_init_py) {
std::mt19937_64& gen_engine =
framework::Generator::GetInstance()->GetCPUEngine();
for (int64_t i = 0; i < size; ++i) data[i] = dist(gen_engine);
} else {
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
std::minstd_rand engine;
if (seed == 0) {
seed = std::random_device()();
}
engine.seed(seed);
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(engine);
}
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(*engine);
}
}
};
......
......@@ -19,6 +19,7 @@ limitations under the License. */
#include <ctime>
#include <string>
#include <vector>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/tensor_util.h"
......@@ -29,20 +30,12 @@ namespace operators {
template <typename T>
static inline void random_permate(T* data_ptr, int num, unsigned int seed) {
auto engine = framework::GetCPURandomEngine(seed);
for (int i = 0; i < num; ++i) {
data_ptr[i] = static_cast<T>(i);
}
if (framework::Generator::GetInstance()->is_init_py) {
std::shuffle(data_ptr, data_ptr + num,
framework::Generator::GetInstance()->GetCPUEngine());
} else {
if (seed == 0) {
seed = std::random_device()();
}
std::srand(seed);
std::random_shuffle(data_ptr, data_ptr + num);
}
std::shuffle(data_ptr, data_ptr + num, *engine);
}
template <typename DeviceContext, typename T>
......
......@@ -51,20 +51,15 @@ class SamplingIdKernel : public framework::OpKernel<T> {
framework::TensorToVector(*input, context.device_context(), &ins_vector);
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
std::minstd_rand engine;
if (seed == 0) {
seed = std::random_device()();
}
engine.seed(seed);
std::uniform_real_distribution<T> dist(
static_cast<T>(context.Attr<float>("min")),
static_cast<T>(context.Attr<float>("max")));
auto engine = framework::GetCPURandomEngine(seed);
std::vector<int64_t> ids(batch_size);
for (int i = 0; i < batch_size; ++i) {
T r = framework::Generator::GetInstance()->is_init_py
? dist(framework::Generator::GetInstance()->GetCPUEngine())
: dist(engine);
T r = dist(*engine);
int idx = width - 1;
for (int j = 0; j < width; ++j) {
if ((r -= ins_vector[i * width + j]) < 0) {
......
......@@ -14,6 +14,7 @@ limitations under the License. */
#include <limits>
#include <random>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
......@@ -167,22 +168,10 @@ class CPUTruncatedGaussianRandomKernel : public framework::OpKernel<T> {
TruncatedNormal<T> truncated_normal(mean, std);
int64_t size = tensor->numel();
if (framework::Generator::GetInstance()->is_init_py) {
std::mt19937_64& gen_engine =
framework::Generator::GetInstance()->GetCPUEngine();
for (int64_t i = 0; i < size; ++i) {
data[i] = truncated_normal(dist(gen_engine));
}
} else {
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
std::minstd_rand engine;
if (seed == 0) {
seed = std::random_device()();
}
engine.seed(seed);
for (int64_t i = 0; i < size; ++i) {
data[i] = truncated_normal(dist(engine));
}
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed);
for (int64_t i = 0; i < size; ++i) {
data[i] = truncated_normal(dist(*engine));
}
}
};
......
......@@ -12,7 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/uniform_random_op.h"
#include <string>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
......@@ -62,34 +64,12 @@ class CPUUniformRandomKernel : public framework::OpKernel<T> {
std::uniform_real_distribution<T> dist(
static_cast<T>(ctx.Attr<float>("min")),
static_cast<T>(ctx.Attr<float>("max")));
auto gen_ptr = framework::Generator::GetInstance();
if (gen_ptr->is_init_py) {
std::mt19937_64 &gen_engine = gen_ptr->GetCPUEngine();
// auto gen_engine = gen_ptr_->GetCPUEngine();
// std::uniform_real_distribution<T> dist(
// static_cast<T>(ctx.Attr<float>("min")),
// static_cast<T>(ctx.Attr<float>("max")));
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
auto engine = framework::GetCPURandomEngine(seed);
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(gen_engine);
}
} else {
unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
std::minstd_rand engine;
if (seed == 0) {
seed = std::random_device()();
}
engine.seed(seed);
// std::uniform_real_distribution<T> dist(
// static_cast<T>(ctx.Attr<float>("min")),
// static_cast<T>(ctx.Attr<float>("max")));
// int64_t size = tensor->numel();
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(engine);
}
for (int64_t i = 0; i < size; ++i) {
data[i] = dist(*engine);
}
// std::mt19937_64 &engine = gen_ptr->GetCPUEngine();
// auto engine = gen_ptr_->GetCPUEngine();
unsigned int diag_num =
static_cast<unsigned int>(ctx.Attr<int>("diag_num"));
......@@ -139,12 +119,12 @@ class UniformRandomOp : public framework::OperatorWithKernel {
if (ctx->HasInputs("ShapeTensorList")) {
// top prority shape
auto inputs_name = ctx->Inputs("ShapeTensorList");
PADDLE_ENFORCE_GT(
inputs_name.size(), 0,
platform::errors::InvalidArgument(
"Input(ShapeTensorList)'size of Op(uniform_random) can't be zero."
"Please check the Attr(shape)'s size of"
"Op(fluid.layers.uniform_random).)"));
PADDLE_ENFORCE_GT(inputs_name.size(), 0,
platform::errors::InvalidArgument(
"Input(ShapeTensorList)'size of "
"Op(uniform_random) can't be zero."
"Please check the Attr(shape)'s size of"
"Op(fluid.layers.uniform_random).)"));
auto out_dims = std::vector<int>(inputs_name.size(), -1);
ctx->SetOutputDim("Out", framework::make_ddim(out_dims));
......
......@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/random.h>
#include <thrust/transform.h>
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
......@@ -88,15 +89,12 @@ class GPUUniformRandomKernel : public framework::OpKernel<T> {
}
T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
if (framework::Generator::GetInstance()->is_init_py) {
seed = static_cast<unsigned int>(
framework::Generator::GetInstance()->GetCurrentSeed());
} else {
if (seed == 0) {
std::random_device rd;
seed = rd();
}
if (seed == 0) {
std::random_device rd;
seed = rd();
}
T min = static_cast<T>(context.Attr<float>("min"));
T max = static_cast<T>(context.Attr<float>("max"));
unsigned int diag_num =
......
......@@ -29,23 +29,36 @@ namespace py = pybind11;
namespace paddle {
namespace pybind {
void BindGenerator(py::module* m) {
py::class_<framework::GeneratorState>(*m, "GeneratorState", "");
py::class_<std::mt19937_64>(*m, "mt19937_64", "");
void BindGenerator(py::module* m_ptr) {
auto& m = *m_ptr;
py::class_<framework::GeneratorState,
std::shared_ptr<framework::GeneratorState>>(m, "GeneratorState")
.def("current_seed",
[](std::shared_ptr<framework::GeneratorState>& self) {
return self->current_seed;
});
py::class_<std::mt19937_64>(m, "mt19937_64", "");
py::class_<framework::Generator, std::shared_ptr<framework::Generator>>(
*m, "Generator")
.def(py::init([]() { return framework::Generator::GetInstanceX(); }),
py::return_value_policy::reference)
.def("get_state", &framework::Generator::GetState,
py::return_value_policy::move)
m, "Generator")
.def("__init__",
[](framework::Generator& self) {
new (&self) framework::Generator();
})
.def("get_state", &framework::Generator::GetState)
.def("set_state", &framework::Generator::SetState)
.def("manual_seed", &framework::Generator::SetCurrentSeed)
.def("manual_seed",
[](std::shared_ptr<framework::Generator>& self, uint64_t seed) {
self->SetCurrentSeed(seed);
return self;
})
.def("seed", &framework::Generator::Seed)
.def("initial_seed", &framework::Generator::GetCurrentSeed)
.def("random", &framework::Generator::Random64)
.def("get_cpu_engine", &framework::Generator::GetCPUEngine,
py::return_value_policy::move)
.def("set_cpu_engine", &framework::Generator::SetCPUEngine);
// .def("get_cpu_engine", &framework::Generator::GetCPUEngine)
// .def("set_cpu_engine", &framework::Generator::SetCPUEngine)
.def_property("_is_init_py", &framework::Generator::GetIsInitPy,
&framework::Generator::SetIsInitPy);
m.def("default_cpu_generator", &framework::DefaultCPUGenerator);
} // end Generator
} // end namespace pybind
} // end namespace paddle
} // namespace paddle
......@@ -42,9 +42,11 @@ class TestSentimentMethods(unittest.TestCase):
def test_data_set(self):
data_set = st.load_sentiment_data()
last_label = -1
for each in st.test():
self.assertNotEqual(each[1], last_label)
last_label = each[1]
self.assertEqual(len(data_set), st.NUM_TOTAL_INSTANCES)
self.assertEqual(len(list(st.train())), st.NUM_TRAINING_INSTANCES)
self.assertEqual(
......
......@@ -92,9 +92,11 @@ class TestWeightDecay(unittest.TestCase):
return param_sum
def check_weight_decay(self, place, model):
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program()
startup_prog.random_seed = 1
with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog):
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1)
......@@ -113,9 +115,11 @@ class TestWeightDecay(unittest.TestCase):
return param_sum
def check_weight_decay2(self, place, model):
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program()
startup_prog.random_seed = 1
with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog):
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1)
......
......@@ -17,44 +17,28 @@ from . import core
__all__ = ['Generator']
default_rng_seed_val = 34342423252
class Generator(object):
class Generator(core.Generator):
"""Generator class"""
def __init__(self, device="CPU"):
"""init"""
self.device = device
seed_in = default_rng_seed_val
if self.device == "CPU":
self.generator = core.Generator()
# self.generator.manual_seed(seed_in)
else:
raise ValueError(
"generator class with device %s does not exist, currently only support generator with device 'CPU' "
% device)
def get_state(self):
return self.generator.get_state()
def set_state(self, state):
self.generator.set_state(state)
def __init__(self, place=None):
"""
Create a generator object which manages the random number generation. ( Experimental Feature )
def manual_seed(self, seed):
self.generator.manual_seed(seed)
Parameters:
place(CPUPlace|CUDAPinnedPlace|CUDAPlace, optional): The place to allocate Tensor. Can be
CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place.
def seed(self):
return self.generator.seed()
Returns:
Generator: A generator object.
def initial_seed(self):
return self.generator.initial_seed()
def random(self):
return self.generator.random()
def get_cpu_engine(self):
return self.generator.get_cpu_engine()
def set_cpu_engine(self, cpu_engine):
self.generator.set_cpu_engine(cpu_engine)
"""
self.place = place
if not place:
place = core.CPUPlace()
if isinstance(place, core.CPUPlace):
super(Generator, self).__init__()
else:
raise ValueError(
"Generator class with %s does is not supported yet, currently only support generator with CPUPlace "
% place)
......@@ -15,6 +15,7 @@
import math
import numpy as np
import unittest
import paddle
from paddle.jit import to_static
import paddle.fluid as fluid
from paddle.fluid import ParamAttr
......@@ -560,8 +561,8 @@ def train_bmn(args, place, to_static):
loss_data = []
with fluid.dygraph.guard(place):
fluid.default_main_program().random_seed = SEED
fluid.default_startup_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
global local_random
local_random = np.random.RandomState(SEED)
......
......@@ -21,6 +21,7 @@ import unittest
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import to_variable
from paddle.fluid.dygraph import Embedding, Linear, GRUUnit
......@@ -448,8 +449,8 @@ def do_train(args, to_static):
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
reader = get_random_input_data(args.batch_size, args.vocab_size,
args.num_labels)
......
......@@ -14,6 +14,7 @@
import time
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
......@@ -447,8 +448,8 @@ def train_mobilenet(args, to_static):
with fluid.dygraph.guard(args.place):
np.random.seed(SEED)
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
if args.model == "MobileNetV1":
net = MobileNetV1(class_dim=args.class_dim, scale=1.0)
......
......@@ -19,7 +19,7 @@ import time
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from paddle.fluid.dygraph.base import to_variable
......@@ -218,8 +218,8 @@ def train(place):
batch_num = 200
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
......
......@@ -16,6 +16,7 @@ import gym
import math
import itertools
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.dygraph.nn as nn
from paddle.fluid.dygraph import to_variable, Layer
......@@ -64,8 +65,8 @@ def train(args, place, to_static):
env.seed(SEED)
with fluid.dygraph.guard(place):
fluid.default_main_program().random_seed = SEED
fluid.default_startup_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
local_random = np.random.RandomState(SEED)
policy = Policy()
......
......@@ -215,8 +215,8 @@ def train(to_static):
"""
with fluid.dygraph.guard(place):
np.random.seed(SEED)
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
train_reader = paddle.batch(
reader_decorator(paddle.dataset.flowers.train(use_xmap=False)),
......
......@@ -331,8 +331,8 @@ def train(train_reader, to_static):
np.random.seed(SEED)
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
se_resnext = SeResNeXt()
optimizer = optimizer_setting(train_parameters, se_resnext.parameters())
......
......@@ -15,6 +15,7 @@ import time
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Conv2D, Linear, Embedding
from paddle.fluid.dygraph import to_variable, ProgramTranslator, declarative
......@@ -285,8 +286,8 @@ def train(args, to_static):
with fluid.dygraph.guard(place):
np.random.seed(SEED)
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
train_reader = fake_data_reader(args.class_num, args.vocab_size,
args.batch_size, args.padding_size)
......
......@@ -108,8 +108,8 @@ def train(conf_dict, to_static):
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
conf_dict['dict_size'] = len(vocab)
conf_dict['seq_len'] = args.seq_len
......
......@@ -18,6 +18,7 @@ import time
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import transformer_util as util
......@@ -31,10 +32,11 @@ STEP_NUM = 10
def train_static(args, batch_generator):
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
train_prog = fluid.Program()
startup_prog = fluid.Program()
train_prog.random_seed = SEED
startup_prog.random_seed = SEED
with fluid.program_guard(train_prog, startup_prog):
with fluid.unique_name.guard():
# define input and reader
......@@ -128,8 +130,8 @@ def train_static(args, batch_generator):
def train_dygraph(args, batch_generator):
with fluid.dygraph.guard(place):
if SEED is not None:
fluid.default_main_program().random_seed = SEED
fluid.default_startup_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
# define data loader
train_loader = fluid.io.DataLoader.from_generator(capacity=10)
train_loader.set_batch_generator(batch_generator, places=place)
......@@ -220,7 +222,8 @@ def train_dygraph(args, batch_generator):
def predict_dygraph(args, batch_generator):
with fluid.dygraph.guard(place):
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
# define data loader
test_loader = fluid.io.DataLoader.from_generator(capacity=10)
......@@ -291,7 +294,8 @@ def predict_dygraph(args, batch_generator):
def predict_static(args, batch_generator):
test_prog = fluid.Program()
with fluid.program_guard(test_prog):
test_prog.random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
# define input and reader
input_field_names = util.encoder_data_input_fields + util.fast_decoder_data_input_fields
......
......@@ -20,7 +20,7 @@ import random
import sys
import time
import unittest
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import declarative, ProgramTranslator, to_variable
from paddle.fluid.dygraph.nn import Conv2D, BatchNorm, Linear, Pool2D
......@@ -272,8 +272,8 @@ def train(args, fake_data_reader, to_static):
random.seed(0)
np.random.seed(0)
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = 1000
fluid.default_main_program().random_seed = 1000
paddle.manual_seed(1000)
paddle.framework.random._manual_program_seed(1000)
video_model = TSM_ResNet("TSM", train_config, 'Train')
......
......@@ -17,6 +17,7 @@ from __future__ import print_function
import multiprocessing
import os
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import compiler
......@@ -64,10 +65,11 @@ class TestParallelExecutorBase(unittest.TestCase):
feed_data_reader, FeedDataReader
), "feed_data_reader must be type of FeedDataReader"
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
main = fluid.Program()
startup = fluid.Program()
startup.random_seed = 1
main.random_seed = 1
with fluid.program_guard(main, startup):
feed_dict, loss = cls.build_model(feed_dict, get_data_from_feeder,
main, method, optimizer)
......
......@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import Parameter
import numpy as np
......@@ -44,10 +45,10 @@ class InplaceTestBase(unittest.TestCase):
def build_program_and_scope(self):
self.place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace()
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
startup_program = fluid.Program()
main_program = fluid.Program()
startup_program.random_seed = 1
main_program.random_seed = 1
scope = fluid.Scope()
with fluid.program_guard(main_program, startup_program):
......
......@@ -16,6 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from test_imperative_base import new_program_scope
......@@ -29,8 +30,8 @@ class TestCompiledProgram(unittest.TestCase):
self.label = np.random.randint(
low=0, high=10, size=[16, 1], dtype=np.int64)
with new_program_scope():
fluid.default_startup_program().random_seed = self.seed
fluid.default_main_program().random_seed = self.seed
paddle.manual_seed(self.seed)
paddle.framework.random._manual_program_seed(self.seed)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
......@@ -46,8 +47,8 @@ class TestCompiledProgram(unittest.TestCase):
def test_compiled_program_base(self):
with new_program_scope():
fluid.default_startup_program().random_seed = self.seed
fluid.default_main_program().random_seed = self.seed
paddle.manual_seed(self.seed)
paddle.framework.random._manual_program_seed(self.seed)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
......@@ -64,8 +65,8 @@ class TestCompiledProgram(unittest.TestCase):
def test_compiled_program_with_data_parallel(self):
with new_program_scope():
fluid.default_startup_program().random_seed = self.seed
fluid.default_main_program().random_seed = self.seed
paddle.manual_seed(self.seed)
paddle.framework.random._manual_program_seed(self.seed)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
......
......@@ -34,10 +34,10 @@ def random_reader():
def simple_fc_net(places, use_legacy_py_reader, use_double_buffer):
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
startup_prog = fluid.Program()
main_prog = fluid.Program()
startup_prog.random_seed = 1
main_prog.random_seed = 1
with fluid.unique_name.guard():
with fluid.program_guard(main_prog, startup_prog):
......
......@@ -27,6 +27,8 @@ from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from paddle.fluid.dygraph.base import to_variable
from test_imperative_base import new_program_scope
SEED = 123123111
class SimpleImgConvPool(fluid.dygraph.Layer):
def __init__(self,
......@@ -105,12 +107,11 @@ class MNIST(fluid.dygraph.Layer):
class TestDygraphMultiForward(unittest.TestCase):
def test_mnist_forward_float32(self):
seed = 90
epoch_num = 1
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
with fluid.dygraph.guard():
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
mnist = MNIST()
sgd = SGDOptimizer(
learning_rate=1e-3, parameter_list=mnist.parameters())
......@@ -142,9 +143,8 @@ class TestDygraphMultiForward(unittest.TestCase):
dy_param_init_value[param.name] = param.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
......
......@@ -18,6 +18,7 @@ from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.layers as layers
......@@ -465,9 +466,9 @@ class PaddingRNNTestBase(unittest.TestCase):
pass
def _prepare_program(self, config, parallel=True):
paddle.manual_seed(config.random_seed)
self.main_program = fluid.Program()
self.startup_program = fluid.Program()
self.startup_program.random_seed = config.random_seed
with fluid.program_guard(self.main_program, self.startup_program):
with fluid.unique_name.guard():
res_vars = lm_model(
......
......@@ -13,6 +13,7 @@
# limitations under the License.
import numpy as np
import paddle
import paddle.fluid as fluid
import six
import unittest
......@@ -37,13 +38,13 @@ class TestEmbeddingIdStopGradientBase(unittest.TestCase):
self.assertTrue(np.array_equal(grad_value1, grad_value2))
def run_program(self, place, stop_gradient=False):
np.random.seed(1)
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
startup_program = fluid.Program()
main_program = fluid.Program()
np.random.seed(1)
startup_program.random_seed = 1
main_program.random_seed = 1
scope = fluid.Scope()
with fluid.program_guard(main_program, startup_program):
with fluid.scope_guard(scope):
......
......@@ -13,6 +13,7 @@
# limitations under the License.
import unittest
import paddle
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
......@@ -135,31 +136,32 @@ class TestFCOpWithPadding(TestFCOp):
class TestFcOp_NumFlattenDims_NegOne(unittest.TestCase):
def test_api(self):
startup_program = Program()
main_program = Program()
startup_program.random_seed = SEED
main_program.random_seed = SEED
with program_guard(main_program, startup_program):
input = np.random.random([2, 2, 25]).astype("float32")
x = fluid.layers.data(
name="x",
shape=[2, 2, 25],
append_batch_size=False,
dtype="float32")
out_1 = fluid.layers.fc(input=x, size=1, num_flatten_dims=-1)
out_2 = fluid.layers.fc(input=x, size=1, num_flatten_dims=2)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
exe = fluid.Executor(place=place)
exe.run(startup_program)
res_1, res_2 = exe.run(main_program,
feed={"x": input},
fetch_list=[out_1, out_2])
assert np.array_equal(res_1, res_2)
def run_program(num_flatten_dims):
paddle.manual_seed(SEED)
startup_program = Program()
main_program = Program()
with program_guard(main_program, startup_program):
input = np.random.random([2, 2, 25]).astype("float32")
x = fluid.layers.data(
name="x",
shape=[2, 2, 25],
append_batch_size=False,
dtype="float32")
out = fluid.layers.fc(input=x,
size=1,
num_flatten_dims=num_flatten_dims)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
exe = fluid.Executor(place=place)
exe.run(startup_program)
out = exe.run(main_program, feed={"x": input}, fetch_list=[out])
res_1 = run_program(-1)
res_2 = run_program(2)
self.assertTrue(np.array_equal(res_1, res_2))
class TestFCOpError(unittest.TestCase):
......
......@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from simple_nets import simple_fc_net, fc_with_batchnorm, init_data, bow_net
from fake_reader import fake_imdb_reader
from parallel_executor_test_base import TestParallelExecutorBase
......
......@@ -19,8 +19,6 @@ import unittest
class TestFuseBatchNormActPass(unittest.TestCase):
def build_program(self, main_program, startup_program, use_cuda, seed=1):
main_program.random_seed = seed
startup_program.random_seed = seed
with fluid.program_guard(main_program, startup_program):
x = fluid.layers.data(name='x', shape=[1, 28, 28], dtype='float32')
y = fluid.layers.data(name="y", shape=[1], dtype='int64')
......@@ -59,6 +57,8 @@ class TestFuseBatchNormActPass(unittest.TestCase):
return x, y, loss
def check(self, place, use_cuda):
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
main_program = fluid.Program()
startup_program = fluid.Program()
x, y, loss = self.build_program(main_program, startup_program, use_cuda)
......
......@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from simple_nets import simple_fc_net, fc_with_batchnorm, init_data, bow_net
from fake_reader import fake_imdb_reader
from parallel_executor_test_base import TestParallelExecutorBase
......
......@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
......@@ -37,6 +37,7 @@ class TestGaussianRandomOp(OpTest):
"seed": 10,
"use_mkldnn": self.use_mkldnn
}
paddle.manual_seed(10)
self.outputs = {'Out': np.zeros((123, 92), dtype='float32')}
......
......@@ -16,6 +16,7 @@
from __future__ import print_function
import os
import unittest
import paddle
import paddle.fluid.generator as generator
import time # temp for debug
......@@ -34,10 +35,11 @@ class TestGenerator(unittest.TestCase):
st = gen.get_state()
gen.set_state(st)
gen.random()
gen.set_cpu_engine(gen.get_cpu_engine())
def test_basic_generator_error(self):
self.assertRaises(ValueError, generator.Generator, device="CUDA")
if paddle.fluid.core.is_compiled_with_cuda():
self.assertRaises(
ValueError, generator.Generator, place=paddle.CUDAPlace(0))
if __name__ == "__main__":
......
......@@ -35,10 +35,10 @@ def random_reader():
def simple_fc_net(places, use_legacy_py_reader, use_double_buffer):
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
startup_prog = fluid.Program()
main_prog = fluid.Program()
startup_prog.random_seed = 1
main_prog.random_seed = 1
with fluid.unique_name.guard():
with fluid.program_guard(main_prog, startup_prog):
......
......@@ -16,6 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
......@@ -266,8 +267,8 @@ class TestHSigmoidOpWithSparseGrad(unittest.TestCase):
def training_test(self, is_sparse):
with fluid.program_guard(fluid.Program(), fluid.Program()):
paddle.manual_seed(1)
start_up = fluid.default_startup_program()
start_up.random_seed = 1 # Fix random seed
x = np.arange(6).reshape(6)
path_table = np.array([(1, 2, -1), (1, 2, -1)]).astype('int64')
path_code = np.array([(1, 0, -1), (0, 0, -1)]).astype('int64')
......
......@@ -121,6 +121,7 @@ class TestAmpScaler(unittest.TestCase):
def run_simple_conv(inp_np, use_scaler=True):
paddle.manual_seed(10)
paddle.framework.random._manual_program_seed(10)
with fluid.dygraph.guard():
model = SimpleConv(
num_channels=3,
......@@ -204,6 +205,7 @@ class TestResnet(unittest.TestCase):
with fluid.dygraph.guard():
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
resnet = ResNet(use_cudnn=True)
optimizer = optimizer_setting(
......
......@@ -206,11 +206,10 @@ class TestDygraphDeepCF(unittest.TestCase):
else:
(users_np, items_np, labels_np, num_users, num_items,
matrix) = get_data()
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
startup = fluid.Program()
startup.random_seed = seed
main = fluid.Program()
main.random_seed = seed
scope = fluid.core.Scope()
with new_program_scope(main=main, startup=startup, scope=scope):
......@@ -244,8 +243,8 @@ class TestDygraphDeepCF(unittest.TestCase):
sys.stderr.write('static loss %s\n' % static_loss)
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
deepcf = DeepCF(num_users, num_items, matrix)
adam = fluid.optimizer.AdamOptimizer(
......@@ -269,8 +268,8 @@ class TestDygraphDeepCF(unittest.TestCase):
sys.stderr.write('dynamic loss: %s %s\n' % (slice, dy_loss))
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
deepcf2 = DeepCF(num_users, num_items, matrix)
adam2 = fluid.optimizer.AdamOptimizer(
......
......@@ -312,6 +312,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase):
with fluid.dygraph.guard():
paddle.manual_seed(123)
paddle.framework.random._manual_program_seed(123)
a = fluid.dygraph.to_variable(value)
a.stop_gradient = False
......@@ -328,6 +329,7 @@ class TestDygraphDoubleGradVisitedUniq(TestCase):
with fluid.dygraph.guard():
paddle.manual_seed(123)
paddle.framework.random._manual_program_seed(123)
a = fluid.dygraph.to_variable(value)
a.stop_gradient = False
......
......@@ -56,13 +56,11 @@ class Generator(fluid.Layer):
class TestDygraphGAN(unittest.TestCase):
def test_gan_float32(self):
seed = 90
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
startup = fluid.Program()
startup.random_seed = seed
discriminate_p = fluid.Program()
generate_p = fluid.Program()
discriminate_p.random_seed = seed
generate_p.random_seed = seed
scope = fluid.core.Scope()
with new_program_scope(
......@@ -133,8 +131,8 @@ class TestDygraphGAN(unittest.TestCase):
dy_params = dict()
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
discriminator = Discriminator()
generator = Generator()
......@@ -177,10 +175,9 @@ class TestDygraphGAN(unittest.TestCase):
dy_params2 = dict()
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
discriminator2 = Discriminator()
generator2 = Generator()
sgd2 = SGDOptimizer(
......
......@@ -61,12 +61,10 @@ class GCN(fluid.Layer):
class TestDygraphGNN(unittest.TestCase):
def test_gnn_float32(self):
seed = 90
paddle.manual_seed(90)
paddle.framework.random._manual_program_seed(90)
startup = fluid.Program()
startup.random_seed = seed
main = fluid.Program()
main.random_seed = seed
scope = fluid.core.Scope()
with new_program_scope(main=main, startup=startup, scope=scope):
......@@ -114,8 +112,8 @@ class TestDygraphGNN(unittest.TestCase):
scope.find_var(model.gc.weight.name).get_tensor())
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(90)
paddle.framework.random._manual_program_seed(90)
features = np.ones([1, 100, 50], dtype=np.float32)
# Use selected rows when it's supported.
......@@ -140,8 +138,8 @@ class TestDygraphGNN(unittest.TestCase):
model_gc_weight_value = model.gc.weight.numpy()
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(90)
paddle.framework.random._manual_program_seed(90)
features2 = np.ones([1, 100, 50], dtype=np.float32)
# Use selected rows when it's supported.
......
......@@ -15,6 +15,7 @@
from __future__ import print_function
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.dygraph.nn import Embedding
......@@ -94,8 +95,8 @@ class TestDygraphSimpleNet(unittest.TestCase):
for is_sort_sum_gradient in [True, False]:
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
simple_net = SimpleNet(
hidden_size=hidden_size,
......@@ -139,8 +140,8 @@ class TestDygraphSimpleNet(unittest.TestCase):
dy_loss_value = dy_loss.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
simple_net = SimpleNet(
hidden_size=hidden_size,
......
......@@ -16,6 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
import six
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear, BatchNorm, Embedding, GRUUnit
......@@ -401,9 +402,9 @@ class TestDygraphOCRAttention(unittest.TestCase):
dtype='int64').reshape([1, Config.max_length])))
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
ocr_attention = OCRAttention()
if Config.learning_rate_decay == "piecewise_decay":
......@@ -453,8 +454,8 @@ class TestDygraphOCRAttention(unittest.TestCase):
dy_param_value[param.name] = param.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
ocr_attention = OCRAttention()
......
......@@ -74,8 +74,8 @@ class TestImperativeOptimizerBase(unittest.TestCase):
with fluid.dygraph.guard(place):
try:
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
mlp = MLP()
optimizer = self.get_optimizer_dygraph(
parameter_list=mlp.parameters())
......@@ -91,8 +91,8 @@ class TestImperativeOptimizerBase(unittest.TestCase):
) else fluid.CUDAPlace(0)
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
mlp = MLP()
optimizer = self.get_optimizer_dygraph(
......@@ -132,8 +132,8 @@ class TestImperativeOptimizerBase(unittest.TestCase):
dy_param_value[param.name] = param.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
if place == None:
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
......
......@@ -74,8 +74,8 @@ class TestImperativeOptimizerBase(unittest.TestCase):
with fluid.dygraph.guard(place):
try:
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
mlp = MLP()
optimizer = self.get_optimizer_dygraph(
parameter_list=mlp.parameters())
......@@ -91,8 +91,8 @@ class TestImperativeOptimizerBase(unittest.TestCase):
) else fluid.CUDAPlace(0)
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
mlp = MLP()
optimizer = self.get_optimizer_dygraph(
......@@ -132,8 +132,8 @@ class TestImperativeOptimizerBase(unittest.TestCase):
dy_param_value[param.name] = param.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
if place == None:
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
......
......@@ -15,6 +15,7 @@
from __future__ import print_function
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.dygraph.nn import Embedding
......@@ -225,8 +226,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
traced_layer = None
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
......@@ -293,8 +294,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
dy_last_hidden_value = last_hidden.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
......
......@@ -15,6 +15,7 @@
from __future__ import print_function
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.dygraph.nn import Embedding
......@@ -43,9 +44,10 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase):
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
......@@ -93,8 +95,9 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase):
dy_last_hidden_value = last_hidden.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
......
......@@ -64,8 +64,8 @@ class TestImperativeMnist(unittest.TestCase):
mask = np.array(mask_list).astype("float32")
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
policy = Policy(input_size=4)
......@@ -105,8 +105,8 @@ class TestImperativeMnist(unittest.TestCase):
dy_param_value[param.name] = param.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
......
......@@ -251,8 +251,8 @@ class TestDygraphResnet(unittest.TestCase):
traced_layer = None
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
resnet = ResNet()
optimizer = optimizer_setting(
......@@ -334,8 +334,8 @@ class TestDygraphResnet(unittest.TestCase):
dy_param_value[param.name] = param.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
......
......@@ -77,9 +77,10 @@ class TestDygraphResnetSortGradient(unittest.TestCase):
batch_size = train_parameters["batch_size"]
batch_num = 10
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
resnet = ResNet()
optimizer = optimizer_setting(
train_parameters, parameter_list=resnet.parameters())
......@@ -136,8 +137,8 @@ class TestDygraphResnetSortGradient(unittest.TestCase):
dy_param_value[param.name] = param.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
......
......@@ -219,8 +219,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
......@@ -305,8 +305,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
......@@ -415,8 +415,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
......@@ -522,8 +522,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
......@@ -635,8 +635,6 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
......@@ -714,8 +712,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
......@@ -805,9 +803,10 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
......
......@@ -219,8 +219,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
......@@ -307,8 +307,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
......@@ -414,8 +414,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
......@@ -522,8 +522,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
......@@ -636,8 +636,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
......@@ -715,8 +715,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
......@@ -806,8 +806,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
......
......@@ -308,8 +308,8 @@ class TestImperativeResneXt(unittest.TestCase):
batch_num = 1
epoch_num = 1
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
se_resnext = SeResNeXt()
optimizer = optimizer_setting(
......@@ -367,8 +367,8 @@ class TestImperativeResneXt(unittest.TestCase):
dy_param_value[param.name] = param.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(fluid.CPUPlace(
) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))
......
......@@ -15,6 +15,7 @@
from __future__ import print_function
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.dygraph.nn import Embedding
......@@ -101,8 +102,8 @@ class TestDygraphSimpleNet(unittest.TestCase):
for is_sort_sum_gradient in [True, False]:
traced_layer = None
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
simple_net = SimpleNet(
hidden_size=hidden_size,
......@@ -145,8 +146,8 @@ class TestDygraphSimpleNet(unittest.TestCase):
dy_loss_value = dy_loss.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
simple_net = SimpleNet(
hidden_size=hidden_size,
......
......@@ -468,8 +468,8 @@ def build_optimizer(layer, cfg, loss=None):
class DyGraphTrainModel(object):
def __init__(self, cfg):
fluid.default_startup_program().random_seed = cfg.seed
fluid.default_main_program().random_seed = cfg.seed
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
self.generator = Generator(cfg)
self.discriminator = Discriminator(cfg)
......@@ -529,12 +529,12 @@ class StaticGraphTrainModel(object):
shape=[None, cfg.c_dim], dtype='float32', name='label_trg')
return image_real, label_org, label_trg
paddle.manual_seed(cfg.seed)
paddle.framework.random._manual_program_seed(cfg.seed)
self.gen_program = fluid.Program()
gen_startup_program = fluid.Program()
with fluid.program_guard(self.gen_program, gen_startup_program):
self.gen_program.random_seed = cfg.seed
gen_startup_program.random_seed = cfg.seed
with fluid.unique_name.guard():
image_real, label_org, label_trg = create_data_layer()
generator = Generator(cfg)
......@@ -546,8 +546,6 @@ class StaticGraphTrainModel(object):
self.dis_program = fluid.Program()
dis_startup_program = fluid.Program()
with fluid.program_guard(self.dis_program, dis_startup_program):
self.dis_program.random_seed = cfg.seed
dis_startup_program.random_seed = cfg.seed
with fluid.unique_name.guard():
image_real, label_org, label_trg = create_data_layer()
generator = Generator(cfg)
......
......@@ -15,6 +15,7 @@
from __future__ import print_function
import unittest
import paddle
import paddle.fluid as fluid
from paddle.fluid import Embedding, LayerNorm, Linear, Layer
from paddle.fluid.dygraph import to_variable, guard
......@@ -949,9 +950,9 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
seed = 90
with guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
transformer = TransFormer(
ModelHyperParams.src_vocab_size,
ModelHyperParams.trg_vocab_size,
......@@ -1034,8 +1035,8 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
dy_token_num_value = dy_token_num.numpy()
with new_program_scope():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
transformer = TransFormer(
ModelHyperParams.src_vocab_size,
ModelHyperParams.trg_vocab_size,
......
......@@ -37,10 +37,10 @@ class TestIrMemoryOptimizeIfElseOp(unittest.TestCase):
use_cuda=True,
use_mem_opt=False,
iter_num=5):
paddle.manual_seed(100)
paddle.framework.random._manual_program_seed(100)
prog = Program()
startup_prog = Program()
prog.random_seed = 100
startup_prog.random_seed = 100
with program_guard(prog, startup_prog):
image = layers.data(name='x', shape=[784], dtype='float32')
......
......@@ -18,7 +18,7 @@ import os
import pickle
import unittest
import numpy as np
import paddle
from paddle.static import InputSpec
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
......@@ -80,7 +80,7 @@ class LinearNetReturnLoss(fluid.dygraph.Layer):
def train(layer, input_size=784, label_size=1):
# create optimizer
adam = fluid.optimizer.SGDOptimizer(
sgd = fluid.optimizer.SGDOptimizer(
learning_rate=0.01, parameter_list=layer.parameters())
# create data loader
train_loader = fluid.io.DataLoader.from_generator(capacity=5)
......@@ -97,7 +97,7 @@ def train(layer, input_size=784, label_size=1):
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
adam.minimize(avg_loss)
sgd.minimize(avg_loss)
layer.clear_gradients()
return [img], layer, avg_loss
......@@ -108,7 +108,8 @@ class TestJitSaveLoad(unittest.TestCase):
# enable dygraph mode
fluid.enable_dygraph()
# config seed
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def train_and_save_model(self, model_path=None, configs=None):
layer = LinearNet(784, 1)
......@@ -149,8 +150,8 @@ class TestJitSaveLoad(unittest.TestCase):
train_layer.train()
load_train_layer.train()
# train & compare
_, _, train_loss = train(train_layer)
_, _, load_train_loss = train(load_train_layer)
img0, _, train_loss = train(train_layer)
img1, _, load_train_loss = train(load_train_layer)
self.assertTrue(
np.array_equal(train_loss.numpy(), load_train_loss.numpy()))
......@@ -293,7 +294,8 @@ class TestJitSaveLoadConfig(unittest.TestCase):
# enable dygraph mode
fluid.enable_dygraph()
# config seed
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def basic_save_load(self, layer, model_path, configs):
# 1. train & save
......@@ -385,7 +387,8 @@ class TestJitMultipleLoading(unittest.TestCase):
# enable dygraph mode
fluid.enable_dygraph()
# config seed
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
# train and save base model
self.train_and_save_orig_model()
......@@ -426,7 +429,8 @@ class TestJitPruneModelAndLoad(unittest.TestCase):
# enable dygraph mode
fluid.enable_dygraph()
# config seed
fluid.default_main_program().random_seed = SEED
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def train_and_save(self):
train_layer = LinearNetReturnHidden(8, 8)
......
......@@ -57,8 +57,8 @@ class LayerTest(unittest.TestCase):
@contextlib.contextmanager
def static_graph(self):
with new_program_scope():
fluid.default_startup_program().random_seed = self.seed
fluid.default_main_program().random_seed = self.seed
paddle.manual_seed(self.seed)
paddle.framework.random._manual_program_seed(self.seed)
yield
def get_static_graph_result(self,
......@@ -77,8 +77,8 @@ class LayerTest(unittest.TestCase):
def dynamic_graph(self, force_to_use_cpu=False):
with fluid.dygraph.guard(
self._get_place(force_to_use_cpu=force_to_use_cpu)):
fluid.default_startup_program().random_seed = self.seed
fluid.default_main_program().random_seed = self.seed
paddle.manual_seed(self.seed)
paddle.framework.random._manual_program_seed(self.seed)
yield
......@@ -1034,7 +1034,7 @@ class TestLayer(LayerTest):
static_rlt2 = self.get_static_graph_result(
feed=feed_dict, fetch_list=[nce_loss2])[0]
with self.dynamic_graph(force_to_use_cpu=True):
with self.dynamic_graph():
words = []
for i in range(window_size):
words.append(base.to_variable(inp_word[i]))
......@@ -1070,7 +1070,7 @@ class TestLayer(LayerTest):
self.assertTrue(np.allclose(static_rlt2, static_rlt))
self.assertTrue(np.allclose(dy_rlt_value, static_rlt))
with self.dynamic_graph(force_to_use_cpu=True):
with self.dynamic_graph():
custom_weight = np.random.randn(dict_size, 128).astype("float32")
weight_attr = fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
......@@ -1996,13 +1996,13 @@ class TestLayer(LayerTest):
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.random.rand(3, 32, 32).astype("float32")
y = np.array([[1], [0], [1]])
# x = np.random.rand(3, 32, 32).astype("float32")
# y = np.array([[1], [0], [1]])
static_out = exe.run(feed={"input": x,
"label": y},
fetch_list=result[0])
with self.dynamic_graph():
with self.dynamic_graph(force_to_use_cpu=True):
data = base.to_variable(x)
label = base.to_variable(y)
fc_out = fluid.layers.fc(data, size=10)
......
......@@ -15,30 +15,33 @@
from __future__ import print_function
import unittest
import paddle
import paddle.fluid as fluid
from paddle.framework import manual_seed
from paddle.fluid.framework import Program, default_main_program, default_startup_program
import numpy as np
class TestManualSeed(unittest.TestCase):
def test_manual_seed(self):
local_program = Program()
local_main_prog = default_main_program()
local_start_prog = default_startup_program()
self.assertEqual(0, local_program.random_seed)
self.assertEqual(0, local_main_prog.random_seed)
self.assertEqual(0, local_start_prog.random_seed)
manual_seed(102)
global_program1 = Program()
global_program2 = Program()
global_main_prog = default_main_program()
global_start_prog = default_startup_program()
self.assertEqual(102, global_program1.random_seed)
self.assertEqual(102, global_program2.random_seed)
self.assertEqual(102, global_main_prog.random_seed)
self.assertEqual(102, global_start_prog.random_seed)
fluid.enable_dygraph()
gen = paddle.manual_seed(12312321111)
x = fluid.layers.gaussian_random([10], dtype="float32")
st1 = gen.get_state()
x1 = fluid.layers.gaussian_random([10], dtype="float32")
gen.set_state(st1)
x2 = fluid.layers.gaussian_random([10], dtype="float32")
gen.manual_seed(12312321111)
x3 = fluid.layers.gaussian_random([10], dtype="float32")
x_np = x.numpy()
x1_np = x1.numpy()
x2_np = x2.numpy()
x3_np = x3.numpy()
if not fluid.core.is_compiled_with_cuda():
self.assertTrue(np.allclose(x1_np, x2_np))
self.assertTrue(np.allclose(x_np, x3_np))
if __name__ == '__main__':
......
......@@ -147,10 +147,8 @@ def test_main(use_cuda, use_py_func_op, use_parallel_executor):
with fluid.program_guard(fluid.Program(), fluid.Program()):
with fluid.scope_guard(fluid.core.Scope()):
fluid.default_main_program().random_seed = 1
fluid.default_startup_program().random_seed = 1
gen = paddle.manual_seed(1)
np.random.seed(1)
img = fluid.layers.data(name='image', shape=[784], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
loss = simple_fc_net(img, label, use_py_func_op)
......@@ -189,17 +187,17 @@ class TestPyFuncOpUseExecutor(unittest.TestCase):
self.use_parallel_executor = False
def test_loss_diff(self):
losses = []
for use_cuda in [True, False]:
losses = []
for use_py_func_op in [True, False]:
L = test_main(use_cuda, use_py_func_op,
self.use_parallel_executor)
if L is not None:
losses.append(L)
for idx in six.moves.range(len(losses) - 1):
max_diff = np.max(np.abs(losses[idx] - losses[0]))
self.assertAlmostEqual(max_diff, 0, delta=1e-3)
for idx in six.moves.range(len(losses) - 1):
max_diff = np.max(np.abs(losses[idx] - losses[0]))
self.assertAlmostEqual(max_diff, 0, delta=1e-3)
class TestPyFuncOpUseParallelExecutor(TestPyFuncOpUseExecutor):
......
......@@ -26,27 +26,31 @@ import paddle.fluid.core as core
class TestGeneratorSeed(unittest.TestCase):
"""
Test cases for cpu generator seed.
"""
# """
# Test cases for cpu generator seed.
# """
def test_generator_uniform_random_dygraph(self):
"""Test Generator seed."""
gen = generator.Generator()
fluid.enable_dygraph()
gen.manual_seed(12312321111)
gen = paddle.manual_seed(12312321111)
x = fluid.layers.uniform_random([10], dtype="float32", min=0.0, max=1.0)
st1 = gen.get_state()
x1 = fluid.layers.uniform_random(
[10], dtype="float32", min=0.0, max=1.0)
gen.set_state(st1)
print(gen.get_state())
x2 = fluid.layers.uniform_random(
[10], dtype="float32", min=0.0, max=1.0)
gen.manual_seed(12312321111)
paddle.manual_seed(12312321111)
x3 = fluid.layers.uniform_random(
[10], dtype="float32", min=0.0, max=1.0)
x_np = x.numpy()
x1_np = x1.numpy()
x2_np = x2.numpy()
......@@ -57,11 +61,9 @@ class TestGeneratorSeed(unittest.TestCase):
self.assertTrue(np.allclose(x_np, x3_np))
def test_generator_uniform_random_static(self):
fluid.disable_dygraph()
gen = generator.Generator()
gen.manual_seed(123123143)
gen = paddle.manual_seed(123123143)
startup_program = fluid.Program()
train_program = fluid.Program()
......@@ -93,11 +95,9 @@ class TestGeneratorSeed(unittest.TestCase):
self.assertTrue(not np.allclose(out1_res2, out1_res1))
def test_gen_dropout_dygraph(self):
gen = generator.Generator()
fluid.enable_dygraph()
gen.manual_seed(111111111)
gen = paddle.manual_seed(111111111)
st = gen.get_state()
# x = np.arange(1,101).reshape(2,50).astype("float32")
x = fluid.layers.uniform_random(
......@@ -110,8 +110,7 @@ class TestGeneratorSeed(unittest.TestCase):
y1 = fluid.layers.dropout(x1, 0.5)
y_np = y.numpy()
y1_np = y1.numpy()
#print(y_np)
#print(y1_np)
if not core.is_compiled_with_cuda():
print(">>>>>>> dropout dygraph >>>>>>>")
self.assertTrue(np.allclose(y_np, y1_np))
......@@ -119,8 +118,7 @@ class TestGeneratorSeed(unittest.TestCase):
def test_gen_dropout_static(self):
fluid.disable_dygraph()
gen = generator.Generator()
gen.manual_seed(123123143)
gen = paddle.manual_seed(123123143)
startup_program = fluid.Program()
train_program = fluid.Program()
......@@ -137,19 +135,16 @@ class TestGeneratorSeed(unittest.TestCase):
out2 = exe.run(train_program, feed={}, fetch_list=[y_1])
out1_np = np.array(out1[0])
out2_np = np.array(out2[0])
# print(out1_np)
# print(out2_np)
if not core.is_compiled_with_cuda():
print(">>>>>>> dropout static >>>>>>>")
self.assertTrue(np.allclose(out1_np, out2_np))
def test_generator_gaussian_random_dygraph(self):
"""Test Generator seed."""
gen = generator.Generator()
fluid.enable_dygraph()
gen.manual_seed(12312321111)
gen = paddle.manual_seed(12312321111)
x = fluid.layers.gaussian_random([10], dtype="float32")
st1 = gen.get_state()
x1 = fluid.layers.gaussian_random([10], dtype="float32")
......@@ -168,11 +163,9 @@ class TestGeneratorSeed(unittest.TestCase):
self.assertTrue(np.allclose(x_np, x3_np))
def test_generator_gaussian_random_static(self):
fluid.disable_dygraph()
gen = generator.Generator()
gen.manual_seed(123123143)
gen = paddle.manual_seed(123123143)
startup_program = fluid.Program()
train_program = fluid.Program()
......@@ -210,7 +203,7 @@ class TestGeneratorSeed(unittest.TestCase):
fluid.enable_dygraph()
gen.manual_seed(12312321111)
gen = paddle.manual_seed(12312321111)
x = paddle.randint(low=10, shape=[10], dtype="int32")
st1 = gen.get_state()
x1 = paddle.randint(low=10, shape=[10], dtype="int32")
......@@ -228,12 +221,64 @@ class TestGeneratorSeed(unittest.TestCase):
self.assertTrue(np.allclose(x1_np, x2_np))
self.assertTrue(np.allclose(x_np, x3_np))
def test_generator_ranint_static(self):
def test_generator_uniform_random_static(self):
fluid.disable_dygraph()
gen = paddle.manual_seed(123123143)
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
result_1 = fluid.layers.uniform_random(shape=[3, 4])
result_2 = fluid.layers.uniform_random(shape=[3, 4])
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program)
out1 = exe.run(train_program,
feed={},
fetch_list=[result_1, result_2])
#gen.set_state(cur_state)
gen.manual_seed(123123143)
out2 = exe.run(train_program,
feed={},
fetch_list=[result_1, result_2])
out1_res1 = np.array(out1[0])
out1_res2 = np.array(out1[1])
out2_res1 = np.array(out2[0])
out2_res2 = np.array(out2[1])
if not core.is_compiled_with_cuda():
self.assertTrue(np.allclose(out1_res1, out2_res1))
self.assertTrue(np.allclose(out1_res2, out2_res2))
self.assertTrue(not np.allclose(out1_res2, out1_res1))
def test_generator_randint_dygraph(self):
"""Test Generator seed."""
fluid.enable_dygraph()
gen = paddle.manual_seed(12312321111)
x = paddle.randint(low=1)
st1 = gen.get_state()
x1 = paddle.randint(low=1)
gen.set_state(st1)
x2 = paddle.randint(low=1)
gen.manual_seed(12312321111)
x3 = paddle.randint(low=1)
x_np = x.numpy()
x1_np = x1.numpy()
x2_np = x2.numpy()
x3_np = x3.numpy()
if not core.is_compiled_with_cuda():
self.assertTrue(np.allclose(x1_np, x2_np))
self.assertTrue(np.allclose(x_np, x3_np))
def test_generator_ranint_static(self):
fluid.disable_dygraph()
gen = generator.Generator()
gen.manual_seed(123123143)
gen = paddle.manual_seed(123123143)
startup_program = fluid.Program()
train_program = fluid.Program()
......@@ -267,11 +312,10 @@ class TestGeneratorSeed(unittest.TestCase):
def test_generator_randperm_dygraph(self):
"""Test Generator seed."""
gen = generator.Generator()
fluid.enable_dygraph()
gen.manual_seed(12312321111)
gen = paddle.manual_seed(12312321111)
x = paddle.randperm(10)
st1 = gen.get_state()
x1 = paddle.randperm(10)
......@@ -284,9 +328,6 @@ class TestGeneratorSeed(unittest.TestCase):
x2_np = x2.numpy()
x3_np = x3.numpy()
# print("## {}".format(x1_np))
# print("## {}".format(x2_np))
if not core.is_compiled_with_cuda():
print(">>>>>>> randperm dygraph >>>>>>>")
self.assertTrue(np.allclose(x1_np, x2_np))
......@@ -296,8 +337,7 @@ class TestGeneratorSeed(unittest.TestCase):
fluid.disable_dygraph()
gen = generator.Generator()
gen.manual_seed(123123143)
paddle.manual_seed(123123143)
startup_program = fluid.Program()
train_program = fluid.Program()
......@@ -312,8 +352,8 @@ class TestGeneratorSeed(unittest.TestCase):
out1 = exe.run(train_program,
feed={},
fetch_list=[result_1, result_2])
#gen.set_state(cur_state)
gen.manual_seed(123123143)
paddle.manual_seed(123123143)
out2 = exe.run(train_program,
feed={},
fetch_list=[result_1, result_2])
......@@ -331,7 +371,7 @@ class TestGeneratorSeed(unittest.TestCase):
def test_generator_sampling_id_dygraph(self):
"""Test Generator seed."""
gen = generator.Generator()
gen = paddle.manual_seed(12312321111)
fluid.enable_dygraph()
......@@ -339,14 +379,17 @@ class TestGeneratorSeed(unittest.TestCase):
x = fluid.layers.uniform_random(
[10, 10], dtype="float32", min=0.0, max=1.0)
y = fluid.layers.sampling_id(x)
st1 = gen.get_state()
x1 = fluid.layers.uniform_random(
[10, 10], dtype="float32", min=0.0, max=1.0)
y1 = fluid.layers.sampling_id(x)
gen.set_state(st1)
x2 = fluid.layers.uniform_random(
[10, 10], dtype="float32", min=0.0, max=1.0)
y2 = fluid.layers.sampling_id(x)
gen.manual_seed(12312321111)
x3 = fluid.layers.uniform_random(
[10, 10], dtype="float32", min=0.0, max=1.0)
......@@ -357,9 +400,6 @@ class TestGeneratorSeed(unittest.TestCase):
x2_np = y2.numpy()
x3_np = y3.numpy()
print("## {}".format(x1_np))
print("## {}".format(x2_np))
if not core.is_compiled_with_cuda():
print(">>>>>>> sampling id dygraph >>>>>>>")
self.assertTrue(np.allclose(x1_np, x2_np))
......@@ -369,8 +409,7 @@ class TestGeneratorSeed(unittest.TestCase):
fluid.disable_dygraph()
gen = generator.Generator()
gen.manual_seed(123123143)
paddle.manual_seed(123123143)
startup_program = fluid.Program()
train_program = fluid.Program()
......@@ -386,8 +425,8 @@ class TestGeneratorSeed(unittest.TestCase):
out1 = exe.run(train_program,
feed={},
fetch_list=[result_1, result_2])
#gen.set_state(cur_state)
gen.manual_seed(123123143)
paddle.manual_seed(123123143)
out2 = exe.run(train_program,
feed={},
fetch_list=[result_1, result_2])
......@@ -406,8 +445,7 @@ class TestGeneratorSeed(unittest.TestCase):
def test_gen_TruncatedNormal_initializer(self):
fluid.disable_dygraph()
gen = generator.Generator()
gen.manual_seed(123123143)
gen = paddle.manual_seed(123123143)
cur_state = gen.get_state()
startup_program = fluid.Program()
......@@ -432,9 +470,7 @@ class TestGeneratorSeed(unittest.TestCase):
out1 = exe.run(train_program,
feed={},
fetch_list=[result_1, result_2])
#gen.set_state(cur_state)
#gen.set_state(cur_state)
gen.manual_seed(123123143)
with fluid.program_guard(train_program, startup_program):
exe.run(startup_program)
......@@ -447,11 +483,6 @@ class TestGeneratorSeed(unittest.TestCase):
out2_res1 = np.array(out2[0])
out2_res2 = np.array(out2[1])
print(out1_res1)
print(out1_res2)
print(out2_res1)
print(out2_res2)
if not core.is_compiled_with_cuda():
print(">>>>>>> sampling id static >>>>>>>")
self.assertTrue(np.allclose(out1_res1, out2_res1))
......
......@@ -169,9 +169,10 @@ class TestRegularizer(unittest.TestCase):
return param_sum
def check_l2decay_regularizer(self, place, model):
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program()
startup_prog.random_seed = 1
with self.scope_prog_guard(
main_prog=main_prog, startup_prog=startup_prog):
data = fluid.layers.data(
......@@ -188,9 +189,11 @@ class TestRegularizer(unittest.TestCase):
return param_sum
def check_l2decay(self, place, model):
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program()
startup_prog.random_seed = 1
with self.scope_prog_guard(
main_prog=main_prog, startup_prog=startup_prog):
data = fluid.layers.data(
......@@ -243,7 +246,8 @@ class TestRegularizer(unittest.TestCase):
with fluid.dygraph.guard():
input = fluid.dygraph.to_variable(
np.random.randn(3, 5).astype('float32'))
fluid.default_main_program().random_seed = 1
paddle.manual_seed(1)
paddle.framework.random._manual_program_seed(1)
linear1 = fluid.dygraph.Linear(
5, 2, param_attr=fc_param_attr, bias_attr=fc_param_attr)
......
......@@ -211,7 +211,8 @@ def ffn(src, encoder_layer, ffn_fc1_act="relu"):
class TestTransformer(unittest.TestCase):
def test_multi_head_attention(self):
def multihead_attention_test_helper(self_attention, cache):
paddle.framework.manual_seed(2020)
paddle.manual_seed(2020)
paddle.framework.random._manual_program_seed(2020)
# self_attention|cross_attention, cache|No cache
with fluid.dygraph.guard(fluid.CPUPlace()):
......@@ -275,6 +276,7 @@ class TestTransformer(unittest.TestCase):
with fluid.dygraph.guard(fluid.CPUPlace()):
paddle.framework.manual_seed(2020)
paddle.framework.random._manual_program_seed(2020)
ffn_fc1_act = "relu"
# 1.generate basic params
......
......@@ -21,6 +21,7 @@ import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid.core as core
import paddle
from paddle.fluid.op import Operator
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
......@@ -234,16 +235,16 @@ class TestUniformRandomOpSelectedRows(unittest.TestCase):
def check_with_place(self, place):
scope = core.Scope()
out = scope.var("X").get_selected_rows()
paddle.manual_seed(10)
op = Operator(
"uniform_random",
Out="X",
shape=[4, 784],
shape=[100, 784],
min=-5.0,
max=10.0,
seed=10)
op.run(scope, place)
self.assertEqual(out.get_tensor().shape(), [4, 784])
self.assertEqual(out.get_tensor().shape(), [100, 784])
hist, prob = output_hist(np.array(out.get_tensor()))
self.assertTrue(
np.allclose(
......@@ -255,19 +256,19 @@ class TestUniformRandomOpSelectedRowsWithDiagInit(
def check_with_place(self, place):
scope = core.Scope()
out = scope.var("X").get_selected_rows()
paddle.manual_seed(10)
op = Operator(
"uniform_random",
Out="X",
shape=[4, 784],
shape=[100, 784],
min=-5.0,
max=10.0,
seed=10,
diag_num=4,
diag_num=100,
diag_step=784,
diag_val=1.0)
op.run(scope, place)
self.assertEqual(out.get_tensor().shape(), [4, 784])
self.assertEqual(out.get_tensor().shape(), [100, 784])
hist, prob = output_hist_diag(np.array(out.get_tensor()))
self.assertTrue(
np.allclose(
......@@ -276,6 +277,7 @@ class TestUniformRandomOpSelectedRowsWithDiagInit(
class TestUniformRandomOpApi(unittest.TestCase):
def test_api(self):
paddle.manual_seed(10)
x = fluid.layers.data('x', shape=[16], dtype='float32', lod_level=1)
y = fluid.layers.fc(x,
size=16,
......@@ -347,12 +349,15 @@ class TestUniformRandomOp_attr_tensor_API(unittest.TestCase):
class TestUniformRandomOp_API_seed(unittest.TestCase):
def test_attr_tensor_API(self):
_seed = 10
gen = paddle.manual_seed(_seed)
gen._is_init_py = False
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
_min = 5
_max = 10
_seed = 10
ret = fluid.layers.nn.uniform_random(
[2, 3, 2], min=_min, max=_max, seed=_seed)
ret_2 = fluid.layers.nn.uniform_random(
......@@ -386,8 +391,8 @@ class TestUniformRandomOpSelectedRowsShapeTensor(unittest.TestCase):
scope = core.Scope()
out = scope.var("X").get_selected_rows()
shape_tensor = scope.var("Shape").get_tensor()
shape_tensor.set(np.array([4, 784]).astype("int64"), place)
shape_tensor.set(np.array([100, 784]).astype("int64"), place)
paddle.manual_seed(10)
op = Operator(
"uniform_random",
ShapeTensor="Shape",
......@@ -396,7 +401,7 @@ class TestUniformRandomOpSelectedRowsShapeTensor(unittest.TestCase):
max=10.0,
seed=10)
op.run(scope, place)
self.assertEqual(out.get_tensor().shape(), [4, 784])
self.assertEqual(out.get_tensor().shape(), [100, 784])
hist, prob = output_hist(np.array(out.get_tensor()))
self.assertTrue(
np.allclose(
......@@ -418,10 +423,10 @@ class TestUniformRandomOpSelectedRowsShapeTensorList(unittest.TestCase):
scope = core.Scope()
out = scope.var("X").get_selected_rows()
shape_1 = scope.var("shape1").get_tensor()
shape_1.set(np.array([4]).astype("int64"), place)
shape_1.set(np.array([100]).astype("int64"), place)
shape_2 = scope.var("shape2").get_tensor()
shape_2.set(np.array([784]).astype("int64"), place)
paddle.manual_seed(10)
op = Operator(
"uniform_random",
ShapeTensorList=["shape1", "shape2"],
......@@ -430,7 +435,7 @@ class TestUniformRandomOpSelectedRowsShapeTensorList(unittest.TestCase):
max=10.0,
seed=10)
op.run(scope, place)
self.assertEqual(out.get_tensor().shape(), [4, 784])
self.assertEqual(out.get_tensor().shape(), [100, 784])
hist, prob = output_hist(np.array(out.get_tensor()))
self.assertTrue(
np.allclose(
......@@ -455,21 +460,21 @@ class TestUniformRandomBatchSizeLikeOpError(unittest.TestCase):
def test_Variable():
x1 = fluid.create_lod_tensor(
np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace())
np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace())
fluid.layers.uniform_random_batch_size_like(x1)
self.assertRaises(TypeError, test_Variable)
def test_shape():
x1 = fluid.layers.data(
name='x2', shape=[4, 784], dtype='float32')
name='x2', shape=[100, 784], dtype='float32')
fluid.layers.uniform_random_batch_size_like(x1, shape="shape")
self.assertRaises(TypeError, test_shape)
def test_dtype():
x2 = fluid.layers.data(
name='x2', shape=[4, 784], dtype='float32')
name='x2', shape=[100, 784], dtype='float32')
fluid.layers.uniform_random_batch_size_like(x2, 'int32')
self.assertRaises(TypeError, test_dtype)
......@@ -495,20 +500,20 @@ class TestUniformOpError(unittest.TestCase):
def test_Variable():
x1 = fluid.create_lod_tensor(
np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace())
np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace())
paddle.tensor.random.uniform(x1)
self.assertRaises(TypeError, test_Variable)
def test_Variable2():
x1 = np.zeros((4, 784))
x1 = np.zeros((100, 784))
paddle.tensor.random.uniform(x1)
self.assertRaises(TypeError, test_Variable2)
def test_dtype():
x2 = fluid.layers.data(
name='x2', shape=[4, 784], dtype='float32')
name='x2', shape=[100, 784], dtype='float32')
paddle.tensor.random.uniform(x2, 'int32')
self.assertRaises(TypeError, test_dtype)
......
......@@ -15,6 +15,7 @@
from __future__ import print_function
import unittest
import paddle
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
import paddle.fluid.core as core
......
......@@ -14,28 +14,50 @@
# TODO: define random api
import paddle.fluid as fluid
from paddle.fluid import core
__all__ = ['manual_seed']
def manual_seed(seed):
"""
:alias_main: paddle.manual_seed
:alias: paddle.manual_seed,paddle.framework.random.manual_seed
Set global manual seed for program
Sets the seed for global default generator, which manages the random number generation.
Args:
manual_seed(int): random seed for program
seed(int): The random seed to set. It is recommend to set a large int number.
Returns:
None.
Generator: The global default generator object.
Examples:
.. code-block:: python
from paddle.framework import manual_seed
manual_seed(102)
import paddle
gen = paddle.manual_seed(102)
"""
#TODO(zhiqiu): 1. remove program.random_seed when all random-related op upgrade
# 2. support gpu generator by global device
seed = int(seed)
core.default_cpu_generator()._is_init_py = True
return core.default_cpu_generator().manual_seed(seed)
def _manual_program_seed(seed):
"""
Sets global seed for generating random numbers.
NOTE(zhiqiu): This is the original implemention of manual_seed. Keeps it temporally
since CUDA generator is not developed, so we need it in the unittest.
Args:
seed(int): The random seed to set. It is recommend to set a large int number.
Returns:
None
"""
fluid.default_main_program().random_seed = seed
fluid.default_startup_program().random_seed = seed
......
......@@ -22,6 +22,7 @@ import numpy as np
import shutil
import tempfile
import paddle
from paddle import fluid
from paddle.nn import Conv2d, Pool2D, Linear, ReLU, Sequential, Softmax
from paddle.fluid.dygraph.base import to_variable
......@@ -170,8 +171,8 @@ class TestModel(unittest.TestCase):
cls.test_dataset, places=cls.device, batch_size=64)
seed = 333
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
dy_lenet = LeNetDygraph()
cls.init_param = dy_lenet.state_dict()
......@@ -222,8 +223,8 @@ class TestModel(unittest.TestCase):
def fit(self, dynamic, num_replicas=None, rank=None):
fluid.enable_dygraph(self.device) if dynamic else None
seed = 333
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
net = LeNet(classifier_activation=None)
optim_new = fluid.optimizer.Adam(
......@@ -327,8 +328,8 @@ class MyModel(fluid.dygraph.Layer):
class TestModelFunction(unittest.TestCase):
def set_seed(self, seed=1024):
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
def test_train_batch(self, dynamic=True):
dim = 20
......
......@@ -20,6 +20,7 @@ import random
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import Embedding, Linear, Layer
from paddle.fluid.layers import BeamSearchDecoder
......@@ -87,15 +88,18 @@ class ModuleApiTest(unittest.TestCase):
fluid.enable_dygraph(place)
else:
fluid.disable_dygraph()
fluid.default_main_program().random_seed = self._random_seed
fluid.default_startup_program().random_seed = self._random_seed
layer = self.model_cls(**self.attrs) if isinstance(
self.attrs, dict) else self.model_cls(*self.attrs)
model = Model(layer, inputs=self.make_inputs())
model.prepare()
if self.param_states:
model.load(self.param_states, optim_state=None)
return model.test_batch(self.inputs)
gen = paddle.manual_seed(self._random_seed)
gen._is_init_py = False
paddle.framework.random._manual_program_seed(self._random_seed)
scope = fluid.core.Scope()
with fluid.scope_guard(scope):
layer = self.model_cls(**self.attrs) if isinstance(
self.attrs, dict) else self.model_cls(*self.attrs)
model = Model(layer, inputs=self.make_inputs())
model.prepare()
if self.param_states:
model.load(self.param_states, optim_state=None)
return model.test_batch(self.inputs)
def check_output_with_place(self, place, mode="test"):
dygraph_output = self._calc_output(place, mode, dygraph=True)
......@@ -129,12 +133,9 @@ class TestBasicLSTM(ModuleApiTest):
@staticmethod
def model_init(model, input_size, hidden_size):
model.lstm = RNN(
BasicLSTMCell(
input_size,
hidden_size,
param_attr=fluid.ParamAttr(name="lstm_weight"),
bias_attr=fluid.ParamAttr(name="lstm_bias")))
model.lstm = RNN(BasicLSTMCell(
input_size,
hidden_size, ))
@staticmethod
def model_forward(model, inputs):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册