未验证 提交 621a4085 编写于 作者: Z zhang wenhui 提交者: GitHub

enhance cvm bpr_loss adam adagrad adamax ftrl error message, test=develop (#24452)

上级 e5861929
......@@ -23,22 +23,26 @@ class BprLossOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null.");
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null.");
PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) should be not null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BprLoss");
OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "BprLoss");
OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "BprLoss");
auto x_dims = ctx->GetInputDim("X");
auto label_dims = ctx->GetInputDim("Label");
int rank = x_dims.size();
PADDLE_ENFORCE_EQ(rank, label_dims.size(),
"Input(X) and Input(Label) shall have the same rank.");
PADDLE_ENFORCE_EQ(
rank, label_dims.size(),
platform::errors::InvalidArgument(
"Input(X) and Input(Label) shall have the same rank."));
if (ctx->IsRuntime() || (framework::product(x_dims) > 0 &&
framework::product(label_dims) > 0)) {
PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1),
PADDLE_ENFORCE_EQ(
framework::slice_ddim(x_dims, 0, rank - 1),
framework::slice_ddim(label_dims, 0, rank - 1),
platform::errors::InvalidArgument(
"Input(X) and Input(Label) shall have the same shape "
"except the last dimension.");
"except the last dimension."));
}
auto y_dims = x_dims;
......@@ -63,33 +67,41 @@ class BprLossGradientOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null.");
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")),
"Input(Y@GRAD) shoudl be not null.");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
"Output(X@GRAD) should be not null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BprLossGradient");
OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "BprLossGradient");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input",
framework::GradVarName("Y"), "BprLossGradient");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
framework::GradVarName("X"), "BprLossGradient");
auto x_dims = ctx->GetInputDim("X");
auto label_dims = ctx->GetInputDim("Label");
auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y"));
int rank = x_dims.size();
PADDLE_ENFORCE_EQ(dy_dims.size(), rank,
"Input(Y@Grad) and Input(X) should have the same rank.");
PADDLE_ENFORCE_EQ(label_dims.size(), rank,
"Input(Label) and Input(X) should have the same rank.");
PADDLE_ENFORCE_EQ(
dy_dims.size(), rank,
platform::errors::InvalidArgument(
"Input(Y@Grad) and Input(X) should have the same rank."));
PADDLE_ENFORCE_EQ(
label_dims.size(), rank,
platform::errors::InvalidArgument(
"Input(Label) and Input(X) should have the same rank."));
PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1),
framework::slice_ddim(label_dims, 0, rank - 1),
platform::errors::InvalidArgument(
"The Input(X) and Input(Label) should have the same "
"shape except the last dimension.");
"shape except the last dimension."));
PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1),
framework::slice_ddim(dy_dims, 0, rank - 1),
platform::errors::InvalidArgument(
"The Input(X) and Input(Y@Grad) should have the same "
"shape except the last dimension.");
"shape except the last dimension."));
PADDLE_ENFORCE_EQ(dy_dims[rank - 1], 1,
"The last dimension of Input(Y@Grad) should be 1.");
platform::errors::InvalidArgument(
"The last dimension of Input(Y@Grad) should be 1."));
PADDLE_ENFORCE_EQ(label_dims[rank - 1], 1,
" the last dimension of Input(Label) should be 1.");
platform::errors::InvalidArgument(
" the last dimension of Input(Label) should be 1."));
ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
ctx->ShareLoD("X", framework::GradVarName("X"));
}
......
......@@ -28,7 +28,6 @@ using Tensor = framework::Tensor;
template <typename T>
struct TolerableValue {
HOSTDEVICE T operator()(const T& x) const {
PADDLE_ENFORCE_EQ(std::is_floating_point<T>::value, true);
const T kApproInf = 1e20;
if (x == INFINITY) return kApproInf;
if (x == -INFINITY) return -kApproInf;
......@@ -62,8 +61,11 @@ class BprLossOpKernel : public framework::OpKernel<T> {
const int64_t* label_data = labels->data<int64_t>();
for (int i = 0; i < step_size; ++i) {
int lbl_pos = label_data[i];
PADDLE_ENFORCE_GE(lbl_pos, 0);
PADDLE_ENFORCE_LT(lbl_pos, class_num);
PADDLE_ENFORCE_GE(lbl_pos, 0, platform::errors::InvalidArgument(
"label data %d is illegal.", lbl_pos));
PADDLE_ENFORCE_LT(lbl_pos, class_num,
platform::errors::InvalidArgument(
"label data %d is illegal.", lbl_pos));
int index_pos = i * class_num + lbl_pos;
T sum = static_cast<T>(0);
for (int j = 0; j < class_num; j++) {
......
......@@ -26,17 +26,20 @@ class CVMOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null.");
PADDLE_ENFORCE(ctx->HasInput("CVM"), "Input(CVM) should be not null.");
PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) should be not null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "CVM");
OP_INOUT_CHECK(ctx->HasInput("CVM"), "Input", "CVM", "CVM");
OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "CVM");
auto x_dims = ctx->GetInputDim("X");
auto cvm_dims = ctx->GetInputDim("CVM");
PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, "Input(X)'s rank should be 2.");
PADDLE_ENFORCE_EQ(cvm_dims.size(), 2UL, "Input(CVM)'s rank should be 2.");
PADDLE_ENFORCE_EQ(cvm_dims[1], 2UL,
PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, platform::errors::InvalidArgument(
"Input(X)'s rank should be 2."));
PADDLE_ENFORCE_EQ(
cvm_dims.size(), 2UL,
platform::errors::InvalidArgument("Input(CVM)'s rank should be 2."));
PADDLE_ENFORCE_EQ(cvm_dims[1], 2UL, platform::errors::InvalidArgument(
"The 2nd dimension of "
"Input(CVM) should be 2.");
"Input(CVM) should be 2."));
if (ctx->Attrs().Get<bool>("use_cvm")) {
ctx->SetOutputDim("Y", {x_dims[0], x_dims[1]});
......@@ -63,27 +66,36 @@ class CVMGradientOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null.");
PADDLE_ENFORCE(ctx->HasInput("CVM"), "Input(CVM) should be not null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")),
"Input(Y@GRAD) should be not null.");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
"Output(X@GRAD) should be not null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "CVMGradient");
OP_INOUT_CHECK(ctx->HasInput("CVM"), "Input", "CVM", "CVMGradient");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input",
framework::GradVarName("Y"), "CVMGradient");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
framework::GradVarName("X"), "CVMGradient");
auto x_dims = ctx->GetInputDim("X");
auto cvm_dims = ctx->GetInputDim("CVM");
auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y"));
PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2.");
PADDLE_ENFORCE_EQ(dy_dims.size(), 2, "Input(Y@Grad)'s rank should be 2.");
PADDLE_ENFORCE_EQ(cvm_dims.size(), 2, "Input(CVM)'s rank should be 2.");
PADDLE_ENFORCE_EQ(x_dims[0], dy_dims[0],
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument(
"Input(X)'s rank should be 2."));
PADDLE_ENFORCE_EQ(
dy_dims.size(), 2,
platform::errors::InvalidArgument("Input(Y@Grad)'s rank should be 2."));
PADDLE_ENFORCE_EQ(
cvm_dims.size(), 2,
platform::errors::InvalidArgument("Input(CVM)'s rank should be 2."));
PADDLE_ENFORCE_EQ(
x_dims[0], dy_dims[0],
platform::errors::InvalidArgument(
"The 1st dimension of Input(X) and Input(Y@Grad) should "
"be equal.");
"be equal."));
PADDLE_ENFORCE_EQ(cvm_dims[1], 2,
PADDLE_ENFORCE_EQ(
cvm_dims[1], 2,
platform::errors::InvalidArgument(
"When Attr(soft_label) == false, the 2nd dimension of "
"Input(CVM) should be 2.");
"Input(CVM) should be 2."));
ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
ctx->ShareLoD("X", framework::GradVarName("X"));
}
......
......@@ -29,35 +29,34 @@ class AdagradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Param"),
"Input(Param) of AdagradOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Grad"),
"Input(Grad) of AdagradOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Moment"),
"Input(Moment) of AdagradOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("LearningRate"),
"Input(LearningRate) of AdagradOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("ParamOut"),
"Output(ParamOut) of AdagradOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("MomentOut"),
"Output(MomentOut) of AdagradOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("Param"), "Input", "Param", "Adagrad");
OP_INOUT_CHECK(ctx->HasInput("Grad"), "Input", "Grad", "Adagrad");
OP_INOUT_CHECK(ctx->HasInput("Moment"), "Input", "Moment", "Adagrad");
OP_INOUT_CHECK(ctx->HasInput("LearningRate"), "Input", "LearningRate",
"Adagrad");
OP_INOUT_CHECK(ctx->HasOutput("ParamOut"), "Output", "ParamOut", "Adagrad");
OP_INOUT_CHECK(ctx->HasOutput("MomentOut"), "Output", "MomentOut",
"Adagrad");
auto lr_dims = ctx->GetInputDim("LearningRate");
PADDLE_ENFORCE_NE(framework::product(lr_dims), 0,
platform::errors::InvalidArgument(
"Maybe the Input variable LearningRate has not "
"been initialized. You may need to confirm "
"if you put exe.run(startup_program) "
"after optimizer.minimize function.");
"after optimizer.minimize function."));
PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1,
"LearningRate should have one element");
platform::errors::InvalidArgument(
"LearningRate should have one element"));
auto param_dims = ctx->GetInputDim("Param");
PADDLE_ENFORCE_EQ(
param_dims, ctx->GetInputDim("Grad"),
"Param and Grad input of AdagradOp should have the same dimension.");
platform::errors::InvalidArgument("Param and Grad input of AdagradOp "
"should have the same dimension."));
PADDLE_ENFORCE_EQ(
param_dims, ctx->GetInputDim("Moment"),
"Param and Moment input of AdagradOp should have the same dimension.");
platform::errors::InvalidArgument("Param and Moment input of AdagradOp "
"should have the same dimension."));
ctx->SetOutputDim("ParamOut", param_dims);
ctx->SetOutputDim("MomentOut", param_dims);
......
......@@ -47,11 +47,12 @@ class AdagradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *param_var = ctx.InputVar("Param");
PADDLE_ENFORCE(param_var->IsType<framework::LoDTensor>(),
PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true,
platform::errors::InvalidArgument(
"The Var(%s)'s type should be LoDTensor, "
"but the received is %s",
ctx.InputNames("Param").front(),
framework::ToTypeName(param_var->Type()));
framework::ToTypeName(param_var->Type())));
auto *param_out_tensor = ctx.Output<framework::Tensor>("ParamOut");
auto *moment_out_tensor = ctx.Output<framework::Tensor>("MomentOut");
......@@ -89,10 +90,14 @@ class AdagradOpKernel : public framework::OpKernel<T> {
}
} else if (grad_var->IsType<framework::SelectedRows>()) {
auto *param_tensor = ctx.Input<framework::Tensor>("Param");
PADDLE_ENFORCE_EQ(param_tensor, param_out_tensor);
PADDLE_ENFORCE_EQ(param_tensor, param_out_tensor,
platform::errors::InvalidArgument(
"the input tensor not euqal with output tensor"));
auto *moment_tensor = ctx.Input<framework::Tensor>("Moment");
PADDLE_ENFORCE_EQ(moment_tensor, moment_out_tensor);
PADDLE_ENFORCE_EQ(moment_tensor, moment_out_tensor,
platform::errors::InvalidArgument(
"the input moment not eual with output moment"));
SparseAdagradFunctor<DeviceContext, T> functor;
functor(ctx.template device_context<DeviceContext>(),
......@@ -100,7 +105,8 @@ class AdagradOpKernel : public framework::OpKernel<T> {
*ctx.Input<framework::Tensor>("LearningRate"), epsilon,
moment_out_tensor, param_out_tensor);
} else {
PADDLE_THROW("Unsupported Variable Type of Grad");
PADDLE_THROW(platform::errors::InvalidArgument(
"Unsupported Variable Type of Grad"));
}
}
};
......
......@@ -376,11 +376,12 @@ class AdamOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto* param_var = ctx.InputVar("Param");
PADDLE_ENFORCE(param_var->IsType<framework::LoDTensor>(),
PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true,
platform::errors::InvalidArgument(
"The Var(%s)'s type should be LoDTensor, "
"but the received is %s",
ctx.InputNames("Param").front(),
framework::ToTypeName(param_var->Type()));
framework::ToTypeName(param_var->Type())));
using paddle::framework::LoDTensor;
......@@ -572,7 +573,8 @@ class AdamOpKernel : public framework::OpKernel<T> {
functor(param->numel());
}
} else {
PADDLE_THROW("Variable type not supported by adam_op");
PADDLE_THROW(platform::errors::InvalidArgument(
"Variable type not supported by adam_op"));
}
}
};
......
......@@ -23,57 +23,61 @@ class AdamaxOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Param"),
"Input(Param) of AdamaxOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Grad"),
"Input(Grad) of AdamaxOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Moment"),
"Input(Moment) of AdamaxOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("InfNorm"),
"Input(InfNorm) of AdamaxOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("LearningRate"),
"Input(LearningRate) of AdamaxOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Beta1Pow"),
"Input(Beta1Pow) of AdamaxOp should not be null.");
PADDLE_ENFORCE(
ctx->GetInputsVarType("Param").front() ==
OP_INOUT_CHECK(ctx->HasInput("Param"), "Input", "Param", "Adamax");
OP_INOUT_CHECK(ctx->HasInput("Grad"), "Input", "Grad", "Adamax");
OP_INOUT_CHECK(ctx->HasInput("Moment"), "Input", "Moment", "Adamax");
OP_INOUT_CHECK(ctx->HasInput("InfNorm"), "Input", "InfNorm", "Adamax");
OP_INOUT_CHECK(ctx->HasInput("LearningRate"), "Input", "LearningRate",
"Adamax");
OP_INOUT_CHECK(ctx->HasInput("Beta1Pow"), "Input", "Beta1Pow", "Adamax");
PADDLE_ENFORCE_EQ(
ctx->GetInputsVarType("Param").front(),
framework::proto::VarType::LOD_TENSOR,
platform::errors::InvalidArgument(
"The input var's type should be LoDTensor, but the received is %s",
ctx->Inputs("Param").front(), ctx->GetInputsVarType("Param").front());
PADDLE_ENFORCE(
ctx->GetInputsVarType("Grad").front() ==
ctx->Inputs("Param").front(),
ctx->GetInputsVarType("Param").front()));
PADDLE_ENFORCE_EQ(
ctx->GetInputsVarType("Grad").front(),
framework::proto::VarType::LOD_TENSOR,
platform::errors::InvalidArgument(
"The input var's type should be LoDTensor, but the received is %s",
ctx->Inputs("Grad").front(), ctx->GetInputsVarType("Grad").front());
ctx->Inputs("Grad").front(),
ctx->GetInputsVarType("Grad").front()));
PADDLE_ENFORCE(ctx->HasOutput("ParamOut"),
"Output(ParamOut) of AdamaxOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("MomentOut"),
"Output(MomentOut) of AdamaxOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("InfNormOut"),
"Output(InfNormOut) of AdamaxOp should not be null.");
OP_INOUT_CHECK(ctx->HasOutput("ParamOut"), "Output", "ParamOut", "Adamax");
OP_INOUT_CHECK(ctx->HasOutput("MomentOut"), "Output", "MomentOut",
"Adamax");
OP_INOUT_CHECK(ctx->HasOutput("InfNormOut"), "Output", "InfNormOut",
"Adamax");
auto lr_dims = ctx->GetInputDim("LearningRate");
PADDLE_ENFORCE_NE(framework::product(lr_dims), 0,
platform::errors::InvalidArgument(
"Maybe the Input variable LearningRate has not "
"been initialized. You may need to confirm "
"if you put exe.run(startup_program) "
"after optimizer.minimize function.");
"after optimizer.minimize function."));
PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1,
"Learning rate should have 1 dimension");
platform::errors::InvalidArgument(
"Learning rate should have 1 dimension"));
auto beta1_pow_dims = ctx->GetInputDim("Beta1Pow");
PADDLE_ENFORCE_EQ(framework::product(beta1_pow_dims), 1,
"Beta1 power accumulator should have 1 dimension");
platform::errors::InvalidArgument(
"Beta1 power accumulator should have 1 dimension"));
auto param_dims = ctx->GetInputDim("Param");
PADDLE_ENFORCE_EQ(
param_dims, ctx->GetInputDim("Grad"),
"Param and Grad input of AdamaxOp should have same dimension");
platform::errors::InvalidArgument(
"Param and Grad input of AdamaxOp should have same dimension"));
PADDLE_ENFORCE_EQ(
param_dims, ctx->GetInputDim("Moment"),
"Param and Moment input of AdamaxOp should have same dimension");
platform::errors::InvalidArgument(
"Param and Moment input of AdamaxOp should have same dimension"));
PADDLE_ENFORCE_EQ(
param_dims, ctx->GetInputDim("InfNorm"),
"Param and InfNorm input of AdamaxOp should have same dimension");
platform::errors::InvalidArgument(
"Param and InfNorm input of AdamaxOp should have same dimension"));
ctx->SetOutputDim("ParamOut", param_dims);
ctx->SetOutputDim("MomentOut", param_dims);
......
......@@ -24,17 +24,19 @@ class AdamaxOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto* param_var = ctx.InputVar("Param");
PADDLE_ENFORCE(param_var->IsType<framework::LoDTensor>(),
PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true,
platform::errors::InvalidArgument(
"The Var(%s)'s type should be LoDTensor, "
"but the received is %s",
ctx.InputNames("Param").front(),
framework::ToTypeName(param_var->Type()));
framework::ToTypeName(param_var->Type())));
const auto* grad_var = ctx.InputVar("Grad");
PADDLE_ENFORCE(grad_var->IsType<framework::LoDTensor>(),
PADDLE_ENFORCE_EQ(grad_var->IsType<framework::LoDTensor>(), true,
platform::errors::InvalidArgument(
"The Var(%s)'s type should be LoDTensor, "
"but the received is %s",
ctx.InputNames("Grad").front(),
framework::ToTypeName(grad_var->Type()));
framework::ToTypeName(grad_var->Type())));
auto param_out_tensor = ctx.Output<framework::Tensor>("ParamOut");
auto moment_out_tensor = ctx.Output<framework::Tensor>("MomentOut");
......
......@@ -24,46 +24,50 @@ class FTRLOp : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Param"),
"Input(Param) of FTRL should not be null.");
PADDLE_ENFORCE(ctx->HasInput("SquaredAccumulator"),
"Input(SquaredAccumulator) of FTRL should not be null.");
PADDLE_ENFORCE(ctx->HasInput("LinearAccumulator"),
"Input(LinearAccumulator) of FTRL should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Grad"),
"Input(Grad) of FTRL should not be null.");
PADDLE_ENFORCE(ctx->HasInput("LearningRate"),
"Input(LearningRate) of FTRL should not be null.");
PADDLE_ENFORCE(
ctx->GetInputsVarType("Param").front() ==
OP_INOUT_CHECK(ctx->HasInput("Param"), "Input", "Param", "FTRL");
OP_INOUT_CHECK(ctx->HasInput("SquaredAccumulator"), "Input",
"SquaredAccumulator", "FTRL");
OP_INOUT_CHECK(ctx->HasInput("LinearAccumulator"), "Input",
"LinearAccumulator", "FTRL");
OP_INOUT_CHECK(ctx->HasInput("Grad"), "Input", "Grad", "FTRL");
OP_INOUT_CHECK(ctx->HasInput("LearningRate"), "Input", "LearningRate",
"FTRL");
PADDLE_ENFORCE_EQ(
ctx->GetInputsVarType("Param").front(),
framework::proto::VarType::LOD_TENSOR,
platform::errors::InvalidArgument(
"The input var's type should be LoDTensor, but the received is %s",
ctx->Inputs("Param").front(), ctx->GetInputsVarType("Param").front());
PADDLE_ENFORCE(
ctx->GetInputsVarType("Grad").front() ==
ctx->Inputs("Param").front(),
ctx->GetInputsVarType("Param").front()));
PADDLE_ENFORCE_EQ(
ctx->GetInputsVarType("Grad").front(),
framework::proto::VarType::LOD_TENSOR,
platform::errors::InvalidArgument(
"The input var's type should be LoDTensor, but the received is %s",
ctx->Inputs("Grad").front(), ctx->GetInputsVarType("Grad").front());
ctx->Inputs("Grad").front(),
ctx->GetInputsVarType("Grad").front()));
PADDLE_ENFORCE(ctx->HasOutput("ParamOut"),
"Output(ParamOut) of FTRL should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("SquaredAccumOut"),
"Output(SquaredAccumOut) of FTRL should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("LinearAccumOut"),
"Output(LinearAccumOut) of FTRL should not be null.");
OP_INOUT_CHECK(ctx->HasOutput("ParamOut"), "Output", "ParamOut", "FTRL");
OP_INOUT_CHECK(ctx->HasOutput("SquaredAccumOut"), "Output",
"SquaredAccumOut", "FTRL");
OP_INOUT_CHECK(ctx->HasOutput("LinearAccumOut"), "Output", "LinearAccumOut",
"FTRL");
auto param_dim = ctx->GetInputDim("Param");
PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"),
"Two input of FTRL Op's dimension must be same.");
platform::errors::InvalidArgument(
"Two input of FTRL Op's dimension must be same."));
auto lr_dim = ctx->GetInputDim("LearningRate");
PADDLE_ENFORCE_NE(framework::product(lr_dim), 0,
platform::errors::InvalidArgument(
"Maybe the Input variable LearningRate has not "
"been initialized. You may need to confirm "
"if you put exe.run(startup_program) "
"after optimizer.minimize function.");
PADDLE_ENFORCE_EQ(framework::product(lr_dim), 1,
"Learning Rate should be a scalar.");
"after optimizer.minimize function."));
PADDLE_ENFORCE_EQ(
framework::product(lr_dim), 1,
platform::errors::InvalidArgument("Learning Rate should be a scalar."));
ctx->SetOutputDim("ParamOut", param_dim);
ctx->SetOutputDim("SquaredAccumOut", param_dim);
......
......@@ -29,17 +29,19 @@ class FTRLOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto* param_var = ctx.InputVar("Param");
PADDLE_ENFORCE(param_var->IsType<framework::LoDTensor>(),
PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true,
platform::errors::InvalidArgument(
"The Var(%s)'s type should be LoDTensor, "
"but the received is %s",
ctx.InputNames("Param").front(),
framework::ToTypeName(param_var->Type()));
framework::ToTypeName(param_var->Type())));
const auto* grad_var = ctx.InputVar("Grad");
PADDLE_ENFORCE(grad_var->IsType<framework::LoDTensor>(),
PADDLE_ENFORCE_EQ(grad_var->IsType<framework::LoDTensor>(), true,
platform::errors::InvalidArgument(
"The Var(%s)'s type should be LoDTensor, "
"but the received is %s",
ctx.InputNames("Grad").front(),
framework::ToTypeName(grad_var->Type()));
framework::ToTypeName(grad_var->Type())));
auto* param_out = ctx.Output<Tensor>("ParamOut");
auto* sq_accum_out = ctx.Output<Tensor>("SquaredAccumOut");
......
......@@ -183,6 +183,8 @@ def bpr_loss(input, label, name=None):
"""
helper = LayerHelper('bpr_loss', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'bpr_loss')
helper.append_op(
type='bpr_loss',
inputs={'X': [input],
......
......@@ -13490,6 +13490,8 @@ def continuous_value_model(input, cvm, use_cvm=True):
"""
helper = LayerHelper('cvm', **locals())
out = helper.create_variable(dtype=input.dtype)
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'cvm')
helper.append_op(
type='cvm',
inputs={'X': [input],
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册