未验证 提交 621a4085 编写于 作者: Z zhang wenhui 提交者: GitHub

enhance cvm bpr_loss adam adagrad adamax ftrl error message, test=develop (#24452)

上级 e5861929
...@@ -23,22 +23,26 @@ class BprLossOp : public framework::OperatorWithKernel { ...@@ -23,22 +23,26 @@ class BprLossOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BprLoss");
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "BprLoss");
PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) should be not null."); OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "BprLoss");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto label_dims = ctx->GetInputDim("Label"); auto label_dims = ctx->GetInputDim("Label");
int rank = x_dims.size(); int rank = x_dims.size();
PADDLE_ENFORCE_EQ(rank, label_dims.size(), PADDLE_ENFORCE_EQ(
"Input(X) and Input(Label) shall have the same rank."); rank, label_dims.size(),
platform::errors::InvalidArgument(
"Input(X) and Input(Label) shall have the same rank."));
if (ctx->IsRuntime() || (framework::product(x_dims) > 0 && if (ctx->IsRuntime() || (framework::product(x_dims) > 0 &&
framework::product(label_dims) > 0)) { framework::product(label_dims) > 0)) {
PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1), PADDLE_ENFORCE_EQ(
framework::slice_ddim(label_dims, 0, rank - 1), framework::slice_ddim(x_dims, 0, rank - 1),
"Input(X) and Input(Label) shall have the same shape " framework::slice_ddim(label_dims, 0, rank - 1),
"except the last dimension."); platform::errors::InvalidArgument(
"Input(X) and Input(Label) shall have the same shape "
"except the last dimension."));
} }
auto y_dims = x_dims; auto y_dims = x_dims;
...@@ -63,33 +67,41 @@ class BprLossGradientOp : public framework::OperatorWithKernel { ...@@ -63,33 +67,41 @@ class BprLossGradientOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BprLossGradient");
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "BprLossGradient");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input",
"Input(Y@GRAD) shoudl be not null."); framework::GradVarName("Y"), "BprLossGradient");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
"Output(X@GRAD) should be not null."); framework::GradVarName("X"), "BprLossGradient");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto label_dims = ctx->GetInputDim("Label"); auto label_dims = ctx->GetInputDim("Label");
auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y")); auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y"));
int rank = x_dims.size(); int rank = x_dims.size();
PADDLE_ENFORCE_EQ(dy_dims.size(), rank, PADDLE_ENFORCE_EQ(
"Input(Y@Grad) and Input(X) should have the same rank."); dy_dims.size(), rank,
PADDLE_ENFORCE_EQ(label_dims.size(), rank, platform::errors::InvalidArgument(
"Input(Label) and Input(X) should have the same rank."); "Input(Y@Grad) and Input(X) should have the same rank."));
PADDLE_ENFORCE_EQ(
label_dims.size(), rank,
platform::errors::InvalidArgument(
"Input(Label) and Input(X) should have the same rank."));
PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1), PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1),
framework::slice_ddim(label_dims, 0, rank - 1), framework::slice_ddim(label_dims, 0, rank - 1),
"The Input(X) and Input(Label) should have the same " platform::errors::InvalidArgument(
"shape except the last dimension."); "The Input(X) and Input(Label) should have the same "
"shape except the last dimension."));
PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1), PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1),
framework::slice_ddim(dy_dims, 0, rank - 1), framework::slice_ddim(dy_dims, 0, rank - 1),
"The Input(X) and Input(Y@Grad) should have the same " platform::errors::InvalidArgument(
"shape except the last dimension."); "The Input(X) and Input(Y@Grad) should have the same "
"shape except the last dimension."));
PADDLE_ENFORCE_EQ(dy_dims[rank - 1], 1, PADDLE_ENFORCE_EQ(dy_dims[rank - 1], 1,
"The last dimension of Input(Y@Grad) should be 1."); platform::errors::InvalidArgument(
"The last dimension of Input(Y@Grad) should be 1."));
PADDLE_ENFORCE_EQ(label_dims[rank - 1], 1, PADDLE_ENFORCE_EQ(label_dims[rank - 1], 1,
" the last dimension of Input(Label) should be 1."); platform::errors::InvalidArgument(
" the last dimension of Input(Label) should be 1."));
ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
ctx->ShareLoD("X", framework::GradVarName("X")); ctx->ShareLoD("X", framework::GradVarName("X"));
} }
......
...@@ -28,7 +28,6 @@ using Tensor = framework::Tensor; ...@@ -28,7 +28,6 @@ using Tensor = framework::Tensor;
template <typename T> template <typename T>
struct TolerableValue { struct TolerableValue {
HOSTDEVICE T operator()(const T& x) const { HOSTDEVICE T operator()(const T& x) const {
PADDLE_ENFORCE_EQ(std::is_floating_point<T>::value, true);
const T kApproInf = 1e20; const T kApproInf = 1e20;
if (x == INFINITY) return kApproInf; if (x == INFINITY) return kApproInf;
if (x == -INFINITY) return -kApproInf; if (x == -INFINITY) return -kApproInf;
...@@ -62,8 +61,11 @@ class BprLossOpKernel : public framework::OpKernel<T> { ...@@ -62,8 +61,11 @@ class BprLossOpKernel : public framework::OpKernel<T> {
const int64_t* label_data = labels->data<int64_t>(); const int64_t* label_data = labels->data<int64_t>();
for (int i = 0; i < step_size; ++i) { for (int i = 0; i < step_size; ++i) {
int lbl_pos = label_data[i]; int lbl_pos = label_data[i];
PADDLE_ENFORCE_GE(lbl_pos, 0); PADDLE_ENFORCE_GE(lbl_pos, 0, platform::errors::InvalidArgument(
PADDLE_ENFORCE_LT(lbl_pos, class_num); "label data %d is illegal.", lbl_pos));
PADDLE_ENFORCE_LT(lbl_pos, class_num,
platform::errors::InvalidArgument(
"label data %d is illegal.", lbl_pos));
int index_pos = i * class_num + lbl_pos; int index_pos = i * class_num + lbl_pos;
T sum = static_cast<T>(0); T sum = static_cast<T>(0);
for (int j = 0; j < class_num; j++) { for (int j = 0; j < class_num; j++) {
......
...@@ -26,17 +26,20 @@ class CVMOp : public framework::OperatorWithKernel { ...@@ -26,17 +26,20 @@ class CVMOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "CVM");
PADDLE_ENFORCE(ctx->HasInput("CVM"), "Input(CVM) should be not null."); OP_INOUT_CHECK(ctx->HasInput("CVM"), "Input", "CVM", "CVM");
PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) should be not null."); OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "CVM");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto cvm_dims = ctx->GetInputDim("CVM"); auto cvm_dims = ctx->GetInputDim("CVM");
PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, "Input(X)'s rank should be 2."); PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(cvm_dims.size(), 2UL, "Input(CVM)'s rank should be 2."); "Input(X)'s rank should be 2."));
PADDLE_ENFORCE_EQ(cvm_dims[1], 2UL, PADDLE_ENFORCE_EQ(
"The 2nd dimension of " cvm_dims.size(), 2UL,
"Input(CVM) should be 2."); platform::errors::InvalidArgument("Input(CVM)'s rank should be 2."));
PADDLE_ENFORCE_EQ(cvm_dims[1], 2UL, platform::errors::InvalidArgument(
"The 2nd dimension of "
"Input(CVM) should be 2."));
if (ctx->Attrs().Get<bool>("use_cvm")) { if (ctx->Attrs().Get<bool>("use_cvm")) {
ctx->SetOutputDim("Y", {x_dims[0], x_dims[1]}); ctx->SetOutputDim("Y", {x_dims[0], x_dims[1]});
...@@ -63,27 +66,36 @@ class CVMGradientOp : public framework::OperatorWithKernel { ...@@ -63,27 +66,36 @@ class CVMGradientOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "CVMGradient");
PADDLE_ENFORCE(ctx->HasInput("CVM"), "Input(CVM) should be not null."); OP_INOUT_CHECK(ctx->HasInput("CVM"), "Input", "CVM", "CVMGradient");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input",
"Input(Y@GRAD) should be not null."); framework::GradVarName("Y"), "CVMGradient");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
"Output(X@GRAD) should be not null."); framework::GradVarName("X"), "CVMGradient");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto cvm_dims = ctx->GetInputDim("CVM"); auto cvm_dims = ctx->GetInputDim("CVM");
auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y")); auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y"));
PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2."); PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(dy_dims.size(), 2, "Input(Y@Grad)'s rank should be 2."); "Input(X)'s rank should be 2."));
PADDLE_ENFORCE_EQ(cvm_dims.size(), 2, "Input(CVM)'s rank should be 2."); PADDLE_ENFORCE_EQ(
dy_dims.size(), 2,
PADDLE_ENFORCE_EQ(x_dims[0], dy_dims[0], platform::errors::InvalidArgument("Input(Y@Grad)'s rank should be 2."));
"The 1st dimension of Input(X) and Input(Y@Grad) should " PADDLE_ENFORCE_EQ(
"be equal."); cvm_dims.size(), 2,
platform::errors::InvalidArgument("Input(CVM)'s rank should be 2."));
PADDLE_ENFORCE_EQ(cvm_dims[1], 2,
"When Attr(soft_label) == false, the 2nd dimension of " PADDLE_ENFORCE_EQ(
"Input(CVM) should be 2."); x_dims[0], dy_dims[0],
platform::errors::InvalidArgument(
"The 1st dimension of Input(X) and Input(Y@Grad) should "
"be equal."));
PADDLE_ENFORCE_EQ(
cvm_dims[1], 2,
platform::errors::InvalidArgument(
"When Attr(soft_label) == false, the 2nd dimension of "
"Input(CVM) should be 2."));
ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
ctx->ShareLoD("X", framework::GradVarName("X")); ctx->ShareLoD("X", framework::GradVarName("X"));
} }
......
...@@ -29,35 +29,34 @@ class AdagradOp : public framework::OperatorWithKernel { ...@@ -29,35 +29,34 @@ class AdagradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Param"), OP_INOUT_CHECK(ctx->HasInput("Param"), "Input", "Param", "Adagrad");
"Input(Param) of AdagradOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("Grad"), "Input", "Grad", "Adagrad");
PADDLE_ENFORCE(ctx->HasInput("Grad"), OP_INOUT_CHECK(ctx->HasInput("Moment"), "Input", "Moment", "Adagrad");
"Input(Grad) of AdagradOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("LearningRate"), "Input", "LearningRate",
PADDLE_ENFORCE(ctx->HasInput("Moment"), "Adagrad");
"Input(Moment) of AdagradOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("ParamOut"), "Output", "ParamOut", "Adagrad");
PADDLE_ENFORCE(ctx->HasInput("LearningRate"), OP_INOUT_CHECK(ctx->HasOutput("MomentOut"), "Output", "MomentOut",
"Input(LearningRate) of AdagradOp should not be null."); "Adagrad");
PADDLE_ENFORCE(ctx->HasOutput("ParamOut"),
"Output(ParamOut) of AdagradOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("MomentOut"),
"Output(MomentOut) of AdagradOp should not be null.");
auto lr_dims = ctx->GetInputDim("LearningRate"); auto lr_dims = ctx->GetInputDim("LearningRate");
PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, PADDLE_ENFORCE_NE(framework::product(lr_dims), 0,
"Maybe the Input variable LearningRate has not " platform::errors::InvalidArgument(
"been initialized. You may need to confirm " "Maybe the Input variable LearningRate has not "
"if you put exe.run(startup_program) " "been initialized. You may need to confirm "
"after optimizer.minimize function."); "if you put exe.run(startup_program) "
"after optimizer.minimize function."));
PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1,
"LearningRate should have one element"); platform::errors::InvalidArgument(
"LearningRate should have one element"));
auto param_dims = ctx->GetInputDim("Param"); auto param_dims = ctx->GetInputDim("Param");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
param_dims, ctx->GetInputDim("Grad"), param_dims, ctx->GetInputDim("Grad"),
"Param and Grad input of AdagradOp should have the same dimension."); platform::errors::InvalidArgument("Param and Grad input of AdagradOp "
"should have the same dimension."));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
param_dims, ctx->GetInputDim("Moment"), param_dims, ctx->GetInputDim("Moment"),
"Param and Moment input of AdagradOp should have the same dimension."); platform::errors::InvalidArgument("Param and Moment input of AdagradOp "
"should have the same dimension."));
ctx->SetOutputDim("ParamOut", param_dims); ctx->SetOutputDim("ParamOut", param_dims);
ctx->SetOutputDim("MomentOut", param_dims); ctx->SetOutputDim("MomentOut", param_dims);
......
...@@ -47,11 +47,12 @@ class AdagradOpKernel : public framework::OpKernel<T> { ...@@ -47,11 +47,12 @@ class AdagradOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext &ctx) const override { void Compute(const framework::ExecutionContext &ctx) const override {
const auto *param_var = ctx.InputVar("Param"); const auto *param_var = ctx.InputVar("Param");
PADDLE_ENFORCE(param_var->IsType<framework::LoDTensor>(), PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true,
"The Var(%s)'s type should be LoDTensor, " platform::errors::InvalidArgument(
"but the received is %s", "The Var(%s)'s type should be LoDTensor, "
ctx.InputNames("Param").front(), "but the received is %s",
framework::ToTypeName(param_var->Type())); ctx.InputNames("Param").front(),
framework::ToTypeName(param_var->Type())));
auto *param_out_tensor = ctx.Output<framework::Tensor>("ParamOut"); auto *param_out_tensor = ctx.Output<framework::Tensor>("ParamOut");
auto *moment_out_tensor = ctx.Output<framework::Tensor>("MomentOut"); auto *moment_out_tensor = ctx.Output<framework::Tensor>("MomentOut");
...@@ -89,10 +90,14 @@ class AdagradOpKernel : public framework::OpKernel<T> { ...@@ -89,10 +90,14 @@ class AdagradOpKernel : public framework::OpKernel<T> {
} }
} else if (grad_var->IsType<framework::SelectedRows>()) { } else if (grad_var->IsType<framework::SelectedRows>()) {
auto *param_tensor = ctx.Input<framework::Tensor>("Param"); auto *param_tensor = ctx.Input<framework::Tensor>("Param");
PADDLE_ENFORCE_EQ(param_tensor, param_out_tensor); PADDLE_ENFORCE_EQ(param_tensor, param_out_tensor,
platform::errors::InvalidArgument(
"the input tensor not euqal with output tensor"));
auto *moment_tensor = ctx.Input<framework::Tensor>("Moment"); auto *moment_tensor = ctx.Input<framework::Tensor>("Moment");
PADDLE_ENFORCE_EQ(moment_tensor, moment_out_tensor); PADDLE_ENFORCE_EQ(moment_tensor, moment_out_tensor,
platform::errors::InvalidArgument(
"the input moment not eual with output moment"));
SparseAdagradFunctor<DeviceContext, T> functor; SparseAdagradFunctor<DeviceContext, T> functor;
functor(ctx.template device_context<DeviceContext>(), functor(ctx.template device_context<DeviceContext>(),
...@@ -100,7 +105,8 @@ class AdagradOpKernel : public framework::OpKernel<T> { ...@@ -100,7 +105,8 @@ class AdagradOpKernel : public framework::OpKernel<T> {
*ctx.Input<framework::Tensor>("LearningRate"), epsilon, *ctx.Input<framework::Tensor>("LearningRate"), epsilon,
moment_out_tensor, param_out_tensor); moment_out_tensor, param_out_tensor);
} else { } else {
PADDLE_THROW("Unsupported Variable Type of Grad"); PADDLE_THROW(platform::errors::InvalidArgument(
"Unsupported Variable Type of Grad"));
} }
} }
}; };
......
...@@ -376,11 +376,12 @@ class AdamOpKernel : public framework::OpKernel<T> { ...@@ -376,11 +376,12 @@ class AdamOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
const auto* param_var = ctx.InputVar("Param"); const auto* param_var = ctx.InputVar("Param");
PADDLE_ENFORCE(param_var->IsType<framework::LoDTensor>(), PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true,
"The Var(%s)'s type should be LoDTensor, " platform::errors::InvalidArgument(
"but the received is %s", "The Var(%s)'s type should be LoDTensor, "
ctx.InputNames("Param").front(), "but the received is %s",
framework::ToTypeName(param_var->Type())); ctx.InputNames("Param").front(),
framework::ToTypeName(param_var->Type())));
using paddle::framework::LoDTensor; using paddle::framework::LoDTensor;
...@@ -572,7 +573,8 @@ class AdamOpKernel : public framework::OpKernel<T> { ...@@ -572,7 +573,8 @@ class AdamOpKernel : public framework::OpKernel<T> {
functor(param->numel()); functor(param->numel());
} }
} else { } else {
PADDLE_THROW("Variable type not supported by adam_op"); PADDLE_THROW(platform::errors::InvalidArgument(
"Variable type not supported by adam_op"));
} }
} }
}; };
......
...@@ -23,57 +23,61 @@ class AdamaxOp : public framework::OperatorWithKernel { ...@@ -23,57 +23,61 @@ class AdamaxOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Param"), OP_INOUT_CHECK(ctx->HasInput("Param"), "Input", "Param", "Adamax");
"Input(Param) of AdamaxOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("Grad"), "Input", "Grad", "Adamax");
PADDLE_ENFORCE(ctx->HasInput("Grad"), OP_INOUT_CHECK(ctx->HasInput("Moment"), "Input", "Moment", "Adamax");
"Input(Grad) of AdamaxOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("InfNorm"), "Input", "InfNorm", "Adamax");
PADDLE_ENFORCE(ctx->HasInput("Moment"), OP_INOUT_CHECK(ctx->HasInput("LearningRate"), "Input", "LearningRate",
"Input(Moment) of AdamaxOp should not be null."); "Adamax");
PADDLE_ENFORCE(ctx->HasInput("InfNorm"), OP_INOUT_CHECK(ctx->HasInput("Beta1Pow"), "Input", "Beta1Pow", "Adamax");
"Input(InfNorm) of AdamaxOp should not be null."); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE(ctx->HasInput("LearningRate"), ctx->GetInputsVarType("Param").front(),
"Input(LearningRate) of AdamaxOp should not be null."); framework::proto::VarType::LOD_TENSOR,
PADDLE_ENFORCE(ctx->HasInput("Beta1Pow"), platform::errors::InvalidArgument(
"Input(Beta1Pow) of AdamaxOp should not be null."); "The input var's type should be LoDTensor, but the received is %s",
PADDLE_ENFORCE( ctx->Inputs("Param").front(),
ctx->GetInputsVarType("Param").front() == ctx->GetInputsVarType("Param").front()));
framework::proto::VarType::LOD_TENSOR, PADDLE_ENFORCE_EQ(
"The input var's type should be LoDTensor, but the received is %s", ctx->GetInputsVarType("Grad").front(),
ctx->Inputs("Param").front(), ctx->GetInputsVarType("Param").front()); framework::proto::VarType::LOD_TENSOR,
PADDLE_ENFORCE( platform::errors::InvalidArgument(
ctx->GetInputsVarType("Grad").front() == "The input var's type should be LoDTensor, but the received is %s",
framework::proto::VarType::LOD_TENSOR, ctx->Inputs("Grad").front(),
"The input var's type should be LoDTensor, but the received is %s", ctx->GetInputsVarType("Grad").front()));
ctx->Inputs("Grad").front(), ctx->GetInputsVarType("Grad").front());
OP_INOUT_CHECK(ctx->HasOutput("ParamOut"), "Output", "ParamOut", "Adamax");
PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), OP_INOUT_CHECK(ctx->HasOutput("MomentOut"), "Output", "MomentOut",
"Output(ParamOut) of AdamaxOp should not be null."); "Adamax");
PADDLE_ENFORCE(ctx->HasOutput("MomentOut"), OP_INOUT_CHECK(ctx->HasOutput("InfNormOut"), "Output", "InfNormOut",
"Output(MomentOut) of AdamaxOp should not be null."); "Adamax");
PADDLE_ENFORCE(ctx->HasOutput("InfNormOut"),
"Output(InfNormOut) of AdamaxOp should not be null.");
auto lr_dims = ctx->GetInputDim("LearningRate"); auto lr_dims = ctx->GetInputDim("LearningRate");
PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, PADDLE_ENFORCE_NE(framework::product(lr_dims), 0,
"Maybe the Input variable LearningRate has not " platform::errors::InvalidArgument(
"been initialized. You may need to confirm " "Maybe the Input variable LearningRate has not "
"if you put exe.run(startup_program) " "been initialized. You may need to confirm "
"after optimizer.minimize function."); "if you put exe.run(startup_program) "
"after optimizer.minimize function."));
PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1,
"Learning rate should have 1 dimension"); platform::errors::InvalidArgument(
"Learning rate should have 1 dimension"));
auto beta1_pow_dims = ctx->GetInputDim("Beta1Pow"); auto beta1_pow_dims = ctx->GetInputDim("Beta1Pow");
PADDLE_ENFORCE_EQ(framework::product(beta1_pow_dims), 1, PADDLE_ENFORCE_EQ(framework::product(beta1_pow_dims), 1,
"Beta1 power accumulator should have 1 dimension"); platform::errors::InvalidArgument(
"Beta1 power accumulator should have 1 dimension"));
auto param_dims = ctx->GetInputDim("Param"); auto param_dims = ctx->GetInputDim("Param");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
param_dims, ctx->GetInputDim("Grad"), param_dims, ctx->GetInputDim("Grad"),
"Param and Grad input of AdamaxOp should have same dimension"); platform::errors::InvalidArgument(
"Param and Grad input of AdamaxOp should have same dimension"));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
param_dims, ctx->GetInputDim("Moment"), param_dims, ctx->GetInputDim("Moment"),
"Param and Moment input of AdamaxOp should have same dimension"); platform::errors::InvalidArgument(
"Param and Moment input of AdamaxOp should have same dimension"));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
param_dims, ctx->GetInputDim("InfNorm"), param_dims, ctx->GetInputDim("InfNorm"),
"Param and InfNorm input of AdamaxOp should have same dimension"); platform::errors::InvalidArgument(
"Param and InfNorm input of AdamaxOp should have same dimension"));
ctx->SetOutputDim("ParamOut", param_dims); ctx->SetOutputDim("ParamOut", param_dims);
ctx->SetOutputDim("MomentOut", param_dims); ctx->SetOutputDim("MomentOut", param_dims);
......
...@@ -24,17 +24,19 @@ class AdamaxOpKernel : public framework::OpKernel<T> { ...@@ -24,17 +24,19 @@ class AdamaxOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
const auto* param_var = ctx.InputVar("Param"); const auto* param_var = ctx.InputVar("Param");
PADDLE_ENFORCE(param_var->IsType<framework::LoDTensor>(), PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true,
"The Var(%s)'s type should be LoDTensor, " platform::errors::InvalidArgument(
"but the received is %s", "The Var(%s)'s type should be LoDTensor, "
ctx.InputNames("Param").front(), "but the received is %s",
framework::ToTypeName(param_var->Type())); ctx.InputNames("Param").front(),
framework::ToTypeName(param_var->Type())));
const auto* grad_var = ctx.InputVar("Grad"); const auto* grad_var = ctx.InputVar("Grad");
PADDLE_ENFORCE(grad_var->IsType<framework::LoDTensor>(), PADDLE_ENFORCE_EQ(grad_var->IsType<framework::LoDTensor>(), true,
"The Var(%s)'s type should be LoDTensor, " platform::errors::InvalidArgument(
"but the received is %s", "The Var(%s)'s type should be LoDTensor, "
ctx.InputNames("Grad").front(), "but the received is %s",
framework::ToTypeName(grad_var->Type())); ctx.InputNames("Grad").front(),
framework::ToTypeName(grad_var->Type())));
auto param_out_tensor = ctx.Output<framework::Tensor>("ParamOut"); auto param_out_tensor = ctx.Output<framework::Tensor>("ParamOut");
auto moment_out_tensor = ctx.Output<framework::Tensor>("MomentOut"); auto moment_out_tensor = ctx.Output<framework::Tensor>("MomentOut");
......
...@@ -24,46 +24,50 @@ class FTRLOp : public framework::OperatorWithKernel { ...@@ -24,46 +24,50 @@ class FTRLOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Param"), OP_INOUT_CHECK(ctx->HasInput("Param"), "Input", "Param", "FTRL");
"Input(Param) of FTRL should not be null."); OP_INOUT_CHECK(ctx->HasInput("SquaredAccumulator"), "Input",
PADDLE_ENFORCE(ctx->HasInput("SquaredAccumulator"), "SquaredAccumulator", "FTRL");
"Input(SquaredAccumulator) of FTRL should not be null."); OP_INOUT_CHECK(ctx->HasInput("LinearAccumulator"), "Input",
PADDLE_ENFORCE(ctx->HasInput("LinearAccumulator"), "LinearAccumulator", "FTRL");
"Input(LinearAccumulator) of FTRL should not be null."); OP_INOUT_CHECK(ctx->HasInput("Grad"), "Input", "Grad", "FTRL");
PADDLE_ENFORCE(ctx->HasInput("Grad"), OP_INOUT_CHECK(ctx->HasInput("LearningRate"), "Input", "LearningRate",
"Input(Grad) of FTRL should not be null."); "FTRL");
PADDLE_ENFORCE(ctx->HasInput("LearningRate"), PADDLE_ENFORCE_EQ(
"Input(LearningRate) of FTRL should not be null."); ctx->GetInputsVarType("Param").front(),
PADDLE_ENFORCE( framework::proto::VarType::LOD_TENSOR,
ctx->GetInputsVarType("Param").front() == platform::errors::InvalidArgument(
framework::proto::VarType::LOD_TENSOR, "The input var's type should be LoDTensor, but the received is %s",
"The input var's type should be LoDTensor, but the received is %s", ctx->Inputs("Param").front(),
ctx->Inputs("Param").front(), ctx->GetInputsVarType("Param").front()); ctx->GetInputsVarType("Param").front()));
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
ctx->GetInputsVarType("Grad").front() == ctx->GetInputsVarType("Grad").front(),
framework::proto::VarType::LOD_TENSOR, framework::proto::VarType::LOD_TENSOR,
"The input var's type should be LoDTensor, but the received is %s", platform::errors::InvalidArgument(
ctx->Inputs("Grad").front(), ctx->GetInputsVarType("Grad").front()); "The input var's type should be LoDTensor, but the received is %s",
ctx->Inputs("Grad").front(),
PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), ctx->GetInputsVarType("Grad").front()));
"Output(ParamOut) of FTRL should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("SquaredAccumOut"), OP_INOUT_CHECK(ctx->HasOutput("ParamOut"), "Output", "ParamOut", "FTRL");
"Output(SquaredAccumOut) of FTRL should not be null."); OP_INOUT_CHECK(ctx->HasOutput("SquaredAccumOut"), "Output",
PADDLE_ENFORCE(ctx->HasOutput("LinearAccumOut"), "SquaredAccumOut", "FTRL");
"Output(LinearAccumOut) of FTRL should not be null."); OP_INOUT_CHECK(ctx->HasOutput("LinearAccumOut"), "Output", "LinearAccumOut",
"FTRL");
auto param_dim = ctx->GetInputDim("Param"); auto param_dim = ctx->GetInputDim("Param");
PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"), PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"),
"Two input of FTRL Op's dimension must be same."); platform::errors::InvalidArgument(
"Two input of FTRL Op's dimension must be same."));
auto lr_dim = ctx->GetInputDim("LearningRate"); auto lr_dim = ctx->GetInputDim("LearningRate");
PADDLE_ENFORCE_NE(framework::product(lr_dim), 0, PADDLE_ENFORCE_NE(framework::product(lr_dim), 0,
"Maybe the Input variable LearningRate has not " platform::errors::InvalidArgument(
"been initialized. You may need to confirm " "Maybe the Input variable LearningRate has not "
"if you put exe.run(startup_program) " "been initialized. You may need to confirm "
"after optimizer.minimize function."); "if you put exe.run(startup_program) "
PADDLE_ENFORCE_EQ(framework::product(lr_dim), 1, "after optimizer.minimize function."));
"Learning Rate should be a scalar."); PADDLE_ENFORCE_EQ(
framework::product(lr_dim), 1,
platform::errors::InvalidArgument("Learning Rate should be a scalar."));
ctx->SetOutputDim("ParamOut", param_dim); ctx->SetOutputDim("ParamOut", param_dim);
ctx->SetOutputDim("SquaredAccumOut", param_dim); ctx->SetOutputDim("SquaredAccumOut", param_dim);
......
...@@ -29,17 +29,19 @@ class FTRLOpKernel : public framework::OpKernel<T> { ...@@ -29,17 +29,19 @@ class FTRLOpKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx) const override {
const auto* param_var = ctx.InputVar("Param"); const auto* param_var = ctx.InputVar("Param");
PADDLE_ENFORCE(param_var->IsType<framework::LoDTensor>(), PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true,
"The Var(%s)'s type should be LoDTensor, " platform::errors::InvalidArgument(
"but the received is %s", "The Var(%s)'s type should be LoDTensor, "
ctx.InputNames("Param").front(), "but the received is %s",
framework::ToTypeName(param_var->Type())); ctx.InputNames("Param").front(),
framework::ToTypeName(param_var->Type())));
const auto* grad_var = ctx.InputVar("Grad"); const auto* grad_var = ctx.InputVar("Grad");
PADDLE_ENFORCE(grad_var->IsType<framework::LoDTensor>(), PADDLE_ENFORCE_EQ(grad_var->IsType<framework::LoDTensor>(), true,
"The Var(%s)'s type should be LoDTensor, " platform::errors::InvalidArgument(
"but the received is %s", "The Var(%s)'s type should be LoDTensor, "
ctx.InputNames("Grad").front(), "but the received is %s",
framework::ToTypeName(grad_var->Type())); ctx.InputNames("Grad").front(),
framework::ToTypeName(grad_var->Type())));
auto* param_out = ctx.Output<Tensor>("ParamOut"); auto* param_out = ctx.Output<Tensor>("ParamOut");
auto* sq_accum_out = ctx.Output<Tensor>("SquaredAccumOut"); auto* sq_accum_out = ctx.Output<Tensor>("SquaredAccumOut");
......
...@@ -183,6 +183,8 @@ def bpr_loss(input, label, name=None): ...@@ -183,6 +183,8 @@ def bpr_loss(input, label, name=None):
""" """
helper = LayerHelper('bpr_loss', **locals()) helper = LayerHelper('bpr_loss', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=input.dtype)
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'bpr_loss')
helper.append_op( helper.append_op(
type='bpr_loss', type='bpr_loss',
inputs={'X': [input], inputs={'X': [input],
......
...@@ -13490,6 +13490,8 @@ def continuous_value_model(input, cvm, use_cvm=True): ...@@ -13490,6 +13490,8 @@ def continuous_value_model(input, cvm, use_cvm=True):
""" """
helper = LayerHelper('cvm', **locals()) helper = LayerHelper('cvm', **locals())
out = helper.create_variable(dtype=input.dtype) out = helper.create_variable(dtype=input.dtype)
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'cvm')
helper.append_op( helper.append_op(
type='cvm', type='cvm',
inputs={'X': [input], inputs={'X': [input],
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册