diff --git a/paddle/fluid/operators/optimizers/adadelta_op.cc b/paddle/fluid/operators/optimizers/adadelta_op.cc index dd365629fccd3df524a55cc272ef6da7a9a6a373..01c0f1bb2d4778c3ba4980b9e7d4faef77901c0b 100644 --- a/paddle/fluid/operators/optimizers/adadelta_op.cc +++ b/paddle/fluid/operators/optimizers/adadelta_op.cc @@ -56,6 +56,11 @@ class AdadeltaOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( param_dim, ctx->GetInputDim("Grad"), "param and grad input of AdadeltaOp should have same dimension"); + PADDLE_ENFORCE_NE(framework::product(ctx->GetInputDim("AvgSquaredGrad")), 0, + "Maybe the Input variable AvgSquaredGrad has not " + "been initialized. You may need to confirm if you put " + "exe.run(startup_program) after optimizer.minimize " + "function."); PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("AvgSquaredGrad"), "Param and AvgSquaredGrad input of AdadeltaOp " "should have same dimension"); diff --git a/paddle/fluid/operators/optimizers/adagrad_op.cc b/paddle/fluid/operators/optimizers/adagrad_op.cc index bd1bb98e63892afdec2c25448d0a83cea8064d54..0310fe2eba8e9fcd02ac6c229f90a1d75ddea63e 100644 --- a/paddle/fluid/operators/optimizers/adagrad_op.cc +++ b/paddle/fluid/operators/optimizers/adagrad_op.cc @@ -44,6 +44,11 @@ class AdagradOp : public framework::OperatorWithKernel { "Output(MomentOut) of AdagradOp should not be null."); auto lr_dims = ctx->GetInputDim("LearningRate"); + PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function."); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, "LearningRate should have one element"); auto param_dims = ctx->GetInputDim("Param"); diff --git a/paddle/fluid/operators/optimizers/adam_op.cc b/paddle/fluid/operators/optimizers/adam_op.cc index dd347aa0afebe5c75e7f3b574083783b4454fd20..fc851e56cbfd2ab6780a3c812309bced2b693acd 100644 --- a/paddle/fluid/operators/optimizers/adam_op.cc +++ b/paddle/fluid/operators/optimizers/adam_op.cc @@ -43,6 +43,11 @@ void AdamOp::InferShape(framework::InferShapeContext* ctx) const { "Output(Moment2Out) of AdamOp should not be null."); auto lr_dims = ctx->GetInputDim("LearningRate"); + PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function."); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, "Learning rate should have 1 dimension"); auto beta1_pow_dims = ctx->GetInputDim("Beta1Pow"); diff --git a/paddle/fluid/operators/optimizers/adamax_op.cc b/paddle/fluid/operators/optimizers/adamax_op.cc index aef1fc972c00f35f8a74791c94098e449e4dcf31..a0152906235cbc8a870a05da990409f661338f6e 100644 --- a/paddle/fluid/operators/optimizers/adamax_op.cc +++ b/paddle/fluid/operators/optimizers/adamax_op.cc @@ -54,6 +54,11 @@ class AdamaxOp : public framework::OperatorWithKernel { "Output(InfNormOut) of AdamaxOp should not be null."); auto lr_dims = ctx->GetInputDim("LearningRate"); + PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function."); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, "Learning rate should have 1 dimension"); auto beta1_pow_dims = ctx->GetInputDim("Beta1Pow"); diff --git a/paddle/fluid/operators/optimizers/decayed_adagrad_op.cc b/paddle/fluid/operators/optimizers/decayed_adagrad_op.cc index 07899278f9ed4d774a308a8db162f056f5107868..b44a84ccf71b574663ba5e425c4537d3769fdffe 100644 --- a/paddle/fluid/operators/optimizers/decayed_adagrad_op.cc +++ b/paddle/fluid/operators/optimizers/decayed_adagrad_op.cc @@ -49,6 +49,11 @@ class DecayedAdagradOp : public framework::OperatorWithKernel { "Output(MomentOut) of DecayedAdagradOp should not be null."); auto lr_dims = ctx->GetInputDim("LearningRate"); + PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function."); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, "LearningRate should have one element"); auto param_dims = ctx->GetInputDim("Param"); diff --git a/paddle/fluid/operators/optimizers/ftrl_op.cc b/paddle/fluid/operators/optimizers/ftrl_op.cc index c1a4f5790bf7feb596edb84e58f2d8288eb29ce2..98b71175624e77bf3ea1d402b9ab13c84d93c8a5 100644 --- a/paddle/fluid/operators/optimizers/ftrl_op.cc +++ b/paddle/fluid/operators/optimizers/ftrl_op.cc @@ -57,6 +57,11 @@ class FTRLOp : public framework::OperatorWithKernel { "Two input of FTRL Op's dimension must be same."); auto lr_dim = ctx->GetInputDim("LearningRate"); + PADDLE_ENFORCE_NE(framework::product(lr_dim), 0, + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function."); PADDLE_ENFORCE_EQ(framework::product(lr_dim), 1, "Learning Rate should be a scalar."); diff --git a/paddle/fluid/operators/optimizers/momentum_op.h b/paddle/fluid/operators/optimizers/momentum_op.h index 29a2ae6755aa609e4a6ee43bbf11fe02ebfa654e..f56f5b6bbe372f9e38b93f00c89fa99d5a58544a 100644 --- a/paddle/fluid/operators/optimizers/momentum_op.h +++ b/paddle/fluid/operators/optimizers/momentum_op.h @@ -54,6 +54,15 @@ class MomentumOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("VelocityOut"), "Output(VelocityOut) of Momentum should not be null."); + auto lr_dims = ctx->GetInputDim("LearningRate"); + PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function."); + PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, + "Learning_rate should be a scalar"); + auto param_dim = ctx->GetInputDim("Param"); if (ctx->GetInputsVarType("Grad")[0] == framework::proto::VarType::LOD_TENSOR) { @@ -64,8 +73,6 @@ class MomentumOp : public framework::OperatorWithKernel { param_dim, ctx->GetInputDim("Velocity"), "Param and Velocity of MomentumOp should have the same dimension."); } - PADDLE_ENFORCE_EQ(framework::product(ctx->GetInputDim("LearningRate")), 1, - "Learning_rate should be a scalar"); ctx->SetOutputDim("ParamOut", param_dim); ctx->SetOutputDim("VelocityOut", param_dim); diff --git a/paddle/fluid/operators/optimizers/sgd_op.cc b/paddle/fluid/operators/optimizers/sgd_op.cc index 62163e45c84771602208f2793e15b5854332af12..9ccf3d9364635ad0bc4423d09776e5a8f253993c 100644 --- a/paddle/fluid/operators/optimizers/sgd_op.cc +++ b/paddle/fluid/operators/optimizers/sgd_op.cc @@ -32,6 +32,11 @@ class SGDOp : public framework::OperatorWithKernel { "Output(ParamOut) of SGDOp should not be null."); auto lr_dims = ctx->GetInputDim("LearningRate"); + PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function."); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, "Learning rate should have 1 element"); auto param_dim = ctx->GetInputDim("Param");