diff --git a/dygraph/cycle_gan/train.py b/dygraph/cycle_gan/train.py index 5147d6324e246aff736c5a79d7a753fc44223cf4..a1422047b0d02f5e6cd9dfaa97e5840d38a7bf69 100644 --- a/dygraph/cycle_gan/train.py +++ b/dygraph/cycle_gan/train.py @@ -47,7 +47,7 @@ lambda_identity = 0.5 tep_per_epoch = 2974 -def optimizer_setting(): +def optimizer_setting(parameters): lr = 0.0002 optimizer = fluid.optimizer.Adam( learning_rate=fluid.layers.piecewise_decay( @@ -56,6 +56,7 @@ def optimizer_setting(): 140 * step_per_epoch, 160 * step_per_epoch, 180 * step_per_epoch ], values=[lr, lr * 0.8, lr * 0.6, lr * 0.4, lr * 0.2, lr * 0.1]), + parameter_list=parameters, beta1=0.5) return optimizer @@ -88,9 +89,14 @@ def train(args): losses = [[], []] t_time = 0 - optimizer1 = optimizer_setting() - optimizer2 = optimizer_setting() - optimizer3 = optimizer_setting() + + vars_G = cycle_gan.build_generator_resnet_9blocks_a.parameters() + cycle_gan.build_generator_resnet_9blocks_b.parameters() + vars_da = cycle_gan.build_gen_discriminator_a.parameters() + vars_db = cycle_gan.build_gen_discriminator_b.parameters() + + optimizer1 = optimizer_setting(vars_G) + optimizer2 = optimizer_setting(vars_da) + optimizer3 = optimizer_setting(vars_db) for epoch in range(args.epoch): batch_id = 0 @@ -114,9 +120,8 @@ def train(args): g_loss_out = g_loss.numpy() g_loss.backward() - vars_G = cycle_gan.build_generator_resnet_9blocks_a.parameters() + cycle_gan.build_generator_resnet_9blocks_b.parameters() - optimizer1.minimize(g_loss, parameter_list=vars_G) + optimizer1.minimize(g_loss) cycle_gan.clear_gradients() fake_pool_B = B_pool.pool_image(fake_B).numpy() @@ -137,8 +142,7 @@ def train(args): d_loss_A = fluid.layers.reduce_mean(d_loss_A) d_loss_A.backward() - vars_da = cycle_gan.build_gen_discriminator_a.parameters() - optimizer2.minimize(d_loss_A, parameter_list=vars_da) + optimizer2.minimize(d_loss_A) cycle_gan.clear_gradients() # optimize the d_B network @@ -150,8 +154,7 @@ def train(args): d_loss_B = fluid.layers.reduce_mean(d_loss_B) d_loss_B.backward() - vars_db = cycle_gan.build_gen_discriminator_b.parameters() - optimizer3.minimize(d_loss_B, parameter_list=vars_db) + optimizer3.minimize(d_loss_B) cycle_gan.clear_gradients() diff --git a/dygraph/mnist/train.py b/dygraph/mnist/train.py index 0398913bc14b5e0679a5038a527a18887c9cbe3f..bb0568c08161afab84a287a53f7cf579fb1f92fb 100644 --- a/dygraph/mnist/train.py +++ b/dygraph/mnist/train.py @@ -187,7 +187,7 @@ def train_mnist(args): if args.use_data_parallel: strategy = fluid.dygraph.parallel.prepare_context() mnist = MNIST() - adam = AdamOptimizer(learning_rate=0.001) + adam = AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters()) if args.use_data_parallel: mnist = fluid.dygraph.parallel.DataParallel(mnist, strategy) diff --git a/dygraph/reinforcement_learning/actor_critic.py b/dygraph/reinforcement_learning/actor_critic.py index 7fce62abe10980ed37e2ad5848f369b16ef193a1..26ff614a588bc5f8206377c1862993312d335120 100644 --- a/dygraph/reinforcement_learning/actor_critic.py +++ b/dygraph/reinforcement_learning/actor_critic.py @@ -68,7 +68,7 @@ with fluid.dygraph.guard(): policy = Policy() eps = np.finfo(np.float32).eps.item() - optimizer = fluid.optimizer.AdamOptimizer(learning_rate=3e-2) + optimizer = fluid.optimizer.AdamOptimizer(learning_rate=3e-2, parameter_list=policy.parameters()) def get_mean_and_std(values=[]): n = 0. diff --git a/dygraph/reinforcement_learning/reinforce.py b/dygraph/reinforcement_learning/reinforce.py index d9d6a15e20225e017753dc30a568de961242e288..e7f4d7e56a6608a8d5027459746806639b06eac7 100644 --- a/dygraph/reinforcement_learning/reinforce.py +++ b/dygraph/reinforcement_learning/reinforce.py @@ -67,7 +67,7 @@ with fluid.dygraph.guard(): policy = Policy() eps = np.finfo(np.float32).eps.item() - optimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-2) + optimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-2, parameter_list=policy.parameters()) def get_mean_and_std(values=[]): n = 0. diff --git a/dygraph/reinforcement_learning/test_actor_critic_load.py b/dygraph/reinforcement_learning/test_actor_critic_load.py index c58b9495f2605cdd6b9749db542bbab3c1e3eba7..21699bef7669c33389d52c71545836d7e0e82dee 100644 --- a/dygraph/reinforcement_learning/test_actor_critic_load.py +++ b/dygraph/reinforcement_learning/test_actor_critic_load.py @@ -68,7 +68,7 @@ with fluid.dygraph.guard(): policy = Policy() eps = np.finfo(np.float32).eps.item() - optimizer = fluid.optimizer.AdamOptimizer(learning_rate=3e-2) + optimizer = fluid.optimizer.AdamOptimizer(learning_rate=3e-2, parameter_list=policy.parameters()) def get_mean_and_std(values=[]): n = 0. diff --git a/dygraph/reinforcement_learning/test_reinforce_load.py b/dygraph/reinforcement_learning/test_reinforce_load.py index 9d8aa5dc68eb7d8c5e155c1b6f0762c9b2a00a3b..31edd66b0f8e4dea8907af71c117fb62dfb6ae42 100644 --- a/dygraph/reinforcement_learning/test_reinforce_load.py +++ b/dygraph/reinforcement_learning/test_reinforce_load.py @@ -67,7 +67,7 @@ with fluid.dygraph.guard(): policy = Policy() eps = np.finfo(np.float32).eps.item() - optimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-2) + optimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-2, parameter_list=policy.parameters()) def get_mean_and_std(values=[]): n = 0.