operator_test.cc 22.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14
#include "paddle/fluid/framework/operator.h"
D
dzhwinter 已提交
15

16
#include "gtest/gtest.h"
Y
Yi Wang 已提交
17 18
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_registry.h"
19
#include "paddle/fluid/platform/errors.h"
20
#include "paddle/fluid/platform/init.h"
Q
Qiao Longfei 已提交
21

22 23
DECLARE_bool(enable_unused_var_check);

Q
Qiao Longfei 已提交
24 25 26
namespace paddle {
namespace framework {

Q
Qiao Longfei 已提交
27 28 29
static int op_run_num = 0;

class OpWithoutKernelTest : public OperatorBase {
Q
Qiao Longfei 已提交
30
 public:
31 32 33 34
  OpWithoutKernelTest(const std::string& type,
                      const VariableNameMap& inputs,
                      const VariableNameMap& outputs,
                      const AttributeMap& attrs)
Y
Yu Yang 已提交
35
      : OperatorBase(type, inputs, outputs, attrs), x(1) {}
36 37 38 39

 private:
  void RunImpl(const Scope& scope,
               const platform::Place& place) const override {
Y
Yu Yang 已提交
40 41 42 43
    ++op_run_num;
    ASSERT_EQ(static_cast<int>(inputs_.size()), 1);
    ASSERT_EQ(static_cast<int>(outputs_.size()), 1);
    ASSERT_EQ(scope.FindVar(inputs_.at("input")[0]), nullptr);
Q
Qiao Longfei 已提交
44
    ASSERT_EQ(x, 1);
Y
Yu Yang 已提交
45
    ASSERT_NE(scope.FindVar(outputs_.at("output")[0]), nullptr);
Q
Qiao Longfei 已提交
46
  }
Q
Qiao Longfei 已提交
47 48

 public:
Y
Yu Yang 已提交
49
  int x{0};
Q
Qiao Longfei 已提交
50 51
};

D
dzhwinter 已提交
52
class OpWithoutKernelCheckerMaker : public OpProtoAndCheckerMaker {
Q
Qiao Longfei 已提交
53
 public:
Y
Yu Yang 已提交
54
  void Make() {
Q
Qiao Longfei 已提交
55 56
    AddInput("input", "input of test op");
    AddOutput("output", "output of test op");
Q
Qiao Longfei 已提交
57
    AddAttr<float>("scale", "scale of cosine op");
X
Xin Pan 已提交
58 59
    AddAttr<int>("kernel_sub_type", "kernels with different implementations.")
        .SetDefault(0);
Q
Qiao Longfei 已提交
60 61 62 63 64 65 66
    AddComment("This is test op");
  }
};

}  // namespace framework
}  // namespace paddle

Y
Yu Yang 已提交
67 68
static void BuildVar(const std::string& param_name,
                     std::initializer_list<const char*> arguments,
69
                     paddle::framework::proto::OpDesc::Var* var) {
Y
Yu Yang 已提交
70 71 72 73 74 75
  var->set_parameter(param_name);
  for (auto& arg_name : arguments) {
    *var->mutable_arguments()->Add() = arg_name;
  }
}

D
dzhwinter 已提交
76 77 78
REGISTER_OP_WITHOUT_GRADIENT(test_operator,
                             paddle::framework::OpWithoutKernelTest,
                             paddle::framework::OpWithoutKernelCheckerMaker);
Q
Qiao Longfei 已提交
79 80

TEST(OperatorBase, all) {
81
  paddle::framework::InitDevices();
82
  paddle::framework::proto::OpDesc op_desc;
Q
Qiao Longfei 已提交
83
  op_desc.set_type("test_operator");
Y
Yu Yang 已提交
84 85
  BuildVar("input", {"IN1"}, op_desc.add_inputs());
  BuildVar("output", {"OUT1"}, op_desc.add_outputs());
Y
Yu Yang 已提交
86

Q
Qiao Longfei 已提交
87 88
  auto attr = op_desc.mutable_attrs()->Add();
  attr->set_name("scale");
89
  attr->set_type(paddle::framework::proto::AttrType::FLOAT);
Q
Qiao Longfei 已提交
90
  attr->set_f(3.14);
Q
Qiao Longfei 已提交
91

D
dzhwinter 已提交
92
  paddle::platform::CPUPlace cpu_place;
Y
Yu Yang 已提交
93
  paddle::framework::Scope scope;
Q
Qiao Longfei 已提交
94

95
  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
D
dongzhihong 已提交
96
  scope.Var("OUT1");
Q
Qiao Longfei 已提交
97
  ASSERT_EQ(paddle::framework::op_run_num, 0);
D
dzhwinter 已提交
98
  op->Run(scope, cpu_place);
Q
Qiao Longfei 已提交
99
  ASSERT_EQ(paddle::framework::op_run_num, 1);
Q
Qiao Longfei 已提交
100 101 102 103 104
}

namespace paddle {
namespace framework {

X
Xin Pan 已提交
105 106
static int special_type_value = 1;

Q
Qiao Longfei 已提交
107 108
class OpKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
109
  void Make() {
Y
Yan Chunwei 已提交
110 111 112 113
    AddInput("x", "input of test op");
    AddOutput("y", "output of test op");
    AddAttr<float>("scale", "scale of cosine op")
        .SetDefault(1.0)
F
fengjiayi 已提交
114
        .GreaterThan(0.0);
X
Xin Pan 已提交
115 116
    AddAttr<int>("kernel_sub_type", "kernels with different implementations.")
        .SetDefault(0);
Q
Qiao Longfei 已提交
117 118 119 120
    AddComment("This is test op");
  }
};

Q
Qiao Longfei 已提交
121
static int cpu_kernel_run_num = 0;
X
Xin Pan 已提交
122
static int cpu_kernel2_run_num = 0;
Q
Qiao Longfei 已提交
123

Q
Qiao Longfei 已提交
124
class OpWithKernelTest : public OperatorWithKernel {
Y
Yu Yang 已提交
125 126 127
 public:
  using OperatorWithKernel::OperatorWithKernel;

Y
Yu Yang 已提交
128
 protected:
129
  void InferShape(framework::InferShapeContext* ctx) const override {}
130 131
  OpKernelType GetExpectedKernelType(
      const ExecutionContext& ctx) const override {
X
Xin Pan 已提交
132
    int sub_type = ctx.Attr<int>("kernel_sub_type");
133 134
    return OpKernelType(proto::VarType::FP32,
                        ctx.GetPlace(),
135
                        phi::DataLayout::kAnyLayout,
136 137
                        framework::LibraryType::kPlain,
                        sub_type);
Y
Yu Yang 已提交
138
  }
Q
Qiao Longfei 已提交
139 140
};

141
template <typename T1, typename T2>
Y
Yu Yang 已提交
142
class CPUKernelTest : public OpKernel<float> {
Q
Qiao Longfei 已提交
143
 public:
144
  void Compute(const ExecutionContext& ctx) const {
H
hong 已提交
145
    std::cout << ctx.DebugString() << std::endl;
Q
Qiao Longfei 已提交
146
    cpu_kernel_run_num++;
H
hong 已提交
147 148
    ASSERT_EQ(ctx.InputName("x"), "IN1");
    ASSERT_EQ(ctx.OutputName("y"), "OUT1");
149
    auto* x = ctx.Input<phi::DenseTensor>("X");
150
    ASSERT_EQ(x, nullptr);
Y
Yan Chunwei 已提交
151 152 153
  }
};

X
Xin Pan 已提交
154 155 156 157
template <typename T1, typename T2>
class CPUKernel2Test : public OpKernel<float> {
 public:
  void Compute(const ExecutionContext& ctx) const {
H
hong 已提交
158
    std::cout << ctx.DebugString() << std::endl;
X
Xin Pan 已提交
159
    cpu_kernel2_run_num++;
H
hong 已提交
160 161
    ASSERT_EQ(ctx.InputName("x"), "IN1");
    ASSERT_EQ(ctx.OutputName("y"), "OUT1");
X
Xin Pan 已提交
162 163 164
  }
};

Y
Yan Chunwei 已提交
165 166 167
class OpKernelTestMultiInputsProtoAndCheckerMaker
    : public OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
168
  void Make() {
Y
Yu Yang 已提交
169
    AddInput("xs", "inputs of test op").AsDuplicable();
Y
Yan Chunwei 已提交
170
    AddInput("k", "input of test op");
Y
Yu Yang 已提交
171
    AddOutput("ys", "outputs of test op").AsDuplicable();
Y
Yan Chunwei 已提交
172 173
    AddAttr<float>("scale", "scale of cosine op")
        .SetDefault(1.0)
F
fengjiayi 已提交
174
        .GreaterThan(0.0);
X
Xin Pan 已提交
175 176
    AddAttr<int>("kernel_sub_type", "kernels with different implementations.")
        .SetDefault(0);
Y
Yan Chunwei 已提交
177 178 179 180
    AddComment("This is test op");
  }
};

Y
Yu Yang 已提交
181
class CPUKernalMultiInputsTest : public OpKernel<float> {
Y
Yan Chunwei 已提交
182
 public:
183
  void Compute(const ExecutionContext& ctx) const {
H
hong 已提交
184
    auto xs = ctx.InputNames("xs");
Y
Yan Chunwei 已提交
185 186 187 188 189
    ASSERT_EQ(xs.size(), 3UL);
    ASSERT_EQ(xs[0], "x0");
    ASSERT_EQ(xs[1], "x1");
    ASSERT_EQ(xs[2], "x2");

190
    auto inVar0 = ctx.MultiInputVar("xs");
191
    ASSERT_EQ(inVar0.size(), 3U);
192 193 194 195 196

    auto intVar1 = ctx.InputVar("k");
    ASSERT_NE(intVar1, nullptr);

    auto outVar0 = ctx.MultiOutputVar("ys");
197
    ASSERT_EQ(outVar0.size(), 2U);
198

199
    auto inTensor0 = ctx.MultiInput<phi::DenseTensor>("xs");
200
    ASSERT_EQ(inTensor0.size(), 3U);
201

202
    auto intTensor1 = ctx.Input<phi::DenseTensor>("k");
203 204
    ASSERT_NE(intTensor1, nullptr);

205
    auto outTensor0 = ctx.MultiOutput<phi::DenseTensor>("ys");
206
    ASSERT_EQ(outTensor0.size(), 2U);
207

H
hong 已提交
208
    auto k = ctx.InputName("k");
Y
Yan Chunwei 已提交
209 210
    ASSERT_EQ(k, "k0");

H
hong 已提交
211
    auto ys = ctx.OutputNames("ys");
Y
Yan Chunwei 已提交
212 213 214
    ASSERT_EQ(ys.size(), 2UL);
    ASSERT_EQ(ys[0], "y0");
    ASSERT_EQ(ys[1], "y1");
Q
Qiao Longfei 已提交
215 216 217
  }
};

Y
Yu Yang 已提交
218 219 220
}  // namespace framework
}  // namespace paddle

F
fengjiayi 已提交
221
REGISTER_OP_WITHOUT_GRADIENT(
222 223
    op_with_kernel,
    paddle::framework::OpWithKernelTest,
F
fengjiayi 已提交
224
    paddle::framework::OpKernelTestProtoAndCheckerMaker);
X
Xin Pan 已提交
225

X
Xin Pan 已提交
226 227
REGISTER_OP_CPU_KERNEL(op_with_kernel,
                       paddle::framework::CPUKernelTest<float, float>);
X
Xin Pan 已提交
228 229

REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(
230 231 232 233
    op_with_kernel,
    CPU,
    paddle::platform::CPUPlace,
    MY_SPECIAL_NAME,
X
Xin Pan 已提交
234
    paddle::framework::special_type_value,
X
Xin Pan 已提交
235
    paddle::framework::CPUKernel2Test<float, float>);
Q
Qiao Longfei 已提交
236

Y
Yan Chunwei 已提交
237
// test with single input
Q
Qiao Longfei 已提交
238
TEST(OpKernel, all) {
239
  paddle::framework::InitDevices();
240
  paddle::framework::proto::OpDesc op_desc;
Q
Qiao Longfei 已提交
241
  op_desc.set_type("op_with_kernel");
Y
Fix CI  
Yu Yang 已提交
242 243
  BuildVar("x", {"IN1"}, op_desc.add_inputs());
  BuildVar("y", {"OUT1"}, op_desc.add_outputs());
Y
Yu Yang 已提交
244

Q
Qiao Longfei 已提交
245 246
  auto attr = op_desc.mutable_attrs()->Add();
  attr->set_name("scale");
247
  attr->set_type(paddle::framework::proto::AttrType::FLOAT);
Q
Qiao Longfei 已提交
248 249
  attr->set_f(3.14);

D
dzhwinter 已提交
250
  paddle::platform::CPUPlace cpu_place;
Y
Yu Yang 已提交
251
  paddle::framework::Scope scope;
Q
Qiao Longfei 已提交
252

253
  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
Q
Qiao Longfei 已提交
254
  ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 0);
D
dzhwinter 已提交
255
  op->Run(scope, cpu_place);
X
Xin Pan 已提交
256
  // kerne_sub_type = 0, hence cpu_kernel is called, cpu_kernel2 is not called.
Q
Qiao Longfei 已提交
257
  ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 1);
X
Xin Pan 已提交
258 259 260 261 262 263 264 265
  ASSERT_EQ(paddle::framework::cpu_kernel2_run_num, 0);

  attr = op_desc.mutable_attrs()->Add();
  attr->set_name("kernel_sub_type");
  attr->set_type(paddle::framework::proto::AttrType::INT);
  attr->set_i(1);
  auto op2 = paddle::framework::OpRegistry::CreateOp(op_desc);
  op2->Run(scope, cpu_place);
X
Xin Pan 已提交
266
  // kerne_sub_type = 1, hence cpu_kernel2 is called, cpu_kernel is not called.
X
Xin Pan 已提交
267 268
  ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 1);
  ASSERT_EQ(paddle::framework::cpu_kernel2_run_num, 1);
Q
Qiao Longfei 已提交
269
}
Y
Yan Chunwei 已提交
270

F
fengjiayi 已提交
271
REGISTER_OP_WITHOUT_GRADIENT(
272 273
    op_multi_inputs_with_kernel,
    paddle::framework::OpWithKernelTest,
F
fengjiayi 已提交
274
    paddle::framework::OpKernelTestMultiInputsProtoAndCheckerMaker);
Y
Yan Chunwei 已提交
275 276 277 278 279
REGISTER_OP_CPU_KERNEL(op_multi_inputs_with_kernel,
                       paddle::framework::CPUKernalMultiInputsTest);

// test with multi inputs
TEST(OpKernel, multi_inputs) {
280
  paddle::framework::InitDevices();
281
  paddle::framework::proto::OpDesc op_desc;
D
dzhwinter 已提交
282

Y
Yan Chunwei 已提交
283
  op_desc.set_type("op_multi_inputs_with_kernel");
Y
Yu Yang 已提交
284 285 286
  BuildVar("xs", {"x0", "x1", "x2"}, op_desc.add_inputs());
  BuildVar("k", {"k0"}, op_desc.add_inputs());
  BuildVar("ys", {"y0", "y1"}, op_desc.add_outputs());
Y
Yu Yang 已提交
287

Y
Yan Chunwei 已提交
288 289
  auto attr = op_desc.mutable_attrs()->Add();
  attr->set_name("scale");
290
  attr->set_type(paddle::framework::proto::AttrType::FLOAT);
Y
Yan Chunwei 已提交
291 292
  attr->set_f(3.14);

D
dzhwinter 已提交
293
  paddle::platform::CPUPlace cpu_place;
Y
Yu Yang 已提交
294
  paddle::framework::Scope scope;
295 296 297 298 299 300
  scope.Var("x0")->GetMutable<phi::DenseTensor>();
  scope.Var("x1")->GetMutable<phi::DenseTensor>();
  scope.Var("x2")->GetMutable<phi::DenseTensor>();
  scope.Var("k0")->GetMutable<phi::DenseTensor>();
  scope.Var("y0")->GetMutable<phi::DenseTensor>();
  scope.Var("y1")->GetMutable<phi::DenseTensor>();
Y
Yan Chunwei 已提交
301

302
  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
D
dzhwinter 已提交
303
  op->Run(scope, cpu_place);
Y
Yan Chunwei 已提交
304
}
305

M
minqiyang 已提交
306
TEST(VarNameTest, all) {
307 308
  std::string var_name("X");
  std::string grad_var_name = paddle::framework::GradVarName(var_name);
M
minqiyang 已提交
309
  ASSERT_EQ(grad_var_name, "X@GRAD");
310
  std::string original_var_name =
M
minqiyang 已提交
311
      paddle::framework::GradOriginalVarName(grad_var_name);
M
minqiyang 已提交
312
  ASSERT_EQ(original_var_name, "X");
M
minqiyang 已提交
313
  original_var_name = paddle::framework::GradOriginalVarName(original_var_name);
M
minqiyang 已提交
314 315 316 317 318
  ASSERT_EQ(original_var_name, "X");

  std::string var_name_2("XYZ");
  grad_var_name = paddle::framework::GradVarName(var_name_2);
  ASSERT_EQ(grad_var_name, "XYZ@GRAD");
M
minqiyang 已提交
319
  original_var_name = paddle::framework::GradOriginalVarName(grad_var_name);
M
minqiyang 已提交
320
  ASSERT_EQ(original_var_name, "XYZ");
M
minqiyang 已提交
321
  original_var_name = paddle::framework::GradOriginalVarName(original_var_name);
M
minqiyang 已提交
322 323 324 325 326
  ASSERT_EQ(original_var_name, "XYZ");

  std::string var_name_3("");
  grad_var_name = paddle::framework::GradVarName(var_name_3);
  ASSERT_EQ(grad_var_name, "@GRAD");
M
minqiyang 已提交
327
  original_var_name = paddle::framework::GradOriginalVarName(grad_var_name);
M
minqiyang 已提交
328
  ASSERT_EQ(original_var_name, "");
M
minqiyang 已提交
329
  original_var_name = paddle::framework::GradOriginalVarName(original_var_name);
M
minqiyang 已提交
330
  ASSERT_EQ(original_var_name, "");
331
}
332 333 334 335 336 337 338 339 340 341 342 343

namespace paddle {
namespace framework {

class IndicateLoDTensorDataTypeTest : public OperatorWithKernel {
 public:
  using OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {}
  OpKernelType GetExpectedKernelType(
      const ExecutionContext& ctx) const override {
344 345
    auto data_type =
        OperatorWithKernel::IndicateVarDataType(ctx, "phi::DenseTensor");
346 347 348
    return framework::OpKernelType(data_type, ctx.device_context());
  }
};
349

350 351 352
class IndicateLoDTensorDataTypeTestProtoMaker : public OpProtoAndCheckerMaker {
 public:
  void Make() {
353
    AddInput("phi::DenseTensor", "Input of phi::DenseTensor type Variable.");
T
tianshuo78520a 已提交
354
    AddComment("This Op is only for IndicateVarDataType interface test.");
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
  }
};

class IndicateSelectedRowsDataTypeTest : public OperatorWithKernel {
 public:
  using OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {}
  OpKernelType GetExpectedKernelType(
      const ExecutionContext& ctx) const override {
    auto data_type =
        OperatorWithKernel::IndicateVarDataType(ctx, "SelectedRows");
    return framework::OpKernelType(data_type, ctx.device_context());
  }
};
class IndicateSelectedRowsDataTypeTestProtoMaker
    : public OpProtoAndCheckerMaker {
 public:
  void Make() {
    AddInput("SelectedRows", "Input of SelectedRows type Variable.");
T
tianshuo78520a 已提交
376
    AddComment("This Op is only for IndicateVarDataType interface test.");
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
  }
};

class IndicateOtherDataTypeTest : public OperatorWithKernel {
 public:
  using OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {}
  OpKernelType GetExpectedKernelType(
      const ExecutionContext& ctx) const override {
    auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Other");
    return framework::OpKernelType(data_type, ctx.device_context());
  }
};
class IndicateOtherDataTypeTestProtoMaker : public OpProtoAndCheckerMaker {
 public:
  void Make() {
    AddInput("Other", "Input of Other type Variable");
T
tianshuo78520a 已提交
396
    AddComment("This Op is only for IndicateVarDataType interface test.");
397 398 399 400
  }
};

template <typename DeviceContext, typename T>
401
class EmptyTestKernel : public OpKernel<T> {
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
 public:
  void Compute(const ExecutionContext& ctx) const {}
};

}  // namespace framework
}  // namespace paddle

REGISTER_OP_WITHOUT_GRADIENT(
    indicate_lod_tensor_data_type_test,
    paddle::framework::IndicateLoDTensorDataTypeTest,
    paddle::framework::IndicateLoDTensorDataTypeTestProtoMaker);
REGISTER_OP_WITHOUT_GRADIENT(
    indicate_selected_rows_data_type_test,
    paddle::framework::IndicateSelectedRowsDataTypeTest,
    paddle::framework::IndicateSelectedRowsDataTypeTestProtoMaker);
REGISTER_OP_WITHOUT_GRADIENT(
418 419
    indicate_other_data_type_test,
    paddle::framework::IndicateOtherDataTypeTest,
420 421
    paddle::framework::IndicateOtherDataTypeTestProtoMaker);

422 423
REGISTER_OP_CPU_KERNEL(
    indicate_lod_tensor_data_type_test,
L
Leo Chen 已提交
424
    paddle::framework::EmptyTestKernel<phi::CPUContext, int>);
425 426
REGISTER_OP_CPU_KERNEL(
    indicate_selected_rows_data_type_test,
L
Leo Chen 已提交
427
    paddle::framework::EmptyTestKernel<phi::CPUContext, int>);
428 429
REGISTER_OP_CPU_KERNEL(
    indicate_other_data_type_test,
L
Leo Chen 已提交
430
    paddle::framework::EmptyTestKernel<phi::CPUContext, int>);
431 432

TEST(IndicateVarDataTypeTest, other) {
433
  paddle::framework::InitDevices();
434 435
  paddle::framework::proto::OpDesc op_desc;
  op_desc.set_type("indicate_other_data_type_test");
L
liym27 已提交
436
  BuildVar("Other", {"lod_rank_table_1"}, op_desc.add_inputs());
437 438 439 440 441

  paddle::platform::CPUPlace cpu_place;
  paddle::framework::Scope scope;

  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
L
liym27 已提交
442 443
  auto* var = scope.Var("lod_rank_table_1");
  var->GetMutable<paddle::framework::LoDRankTable>();
444 445 446 447

  bool caught = false;
  try {
    op->Run(scope, cpu_place);
Z
Zeng Jinle 已提交
448
  } catch (paddle::platform::EnforceNotMet& err) {
449 450
    caught = true;
    std::string ex_msg = err.what();
451 452 453 454 455
    EXPECT_TRUE(ex_msg.find("The Input Variable(Other) of "
                            "(indicate_other_data_type_test) Operator used to "
                            "determine kernel data type "
                            "is empty or not phi::DenseTensor or SelectedRows "
                            "or LoDTensorArray.") != std::string::npos);
456 457 458
  }
  ASSERT_TRUE(caught);
}
459

H
hong 已提交
460
TEST(ExecutionContextAttrAndInOut, new_api) {
461
  paddle::framework::InitDevices();
H
hong 已提交
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
  paddle::framework::proto::OpDesc op_desc;
  op_desc.set_type("test_operator");
  BuildVar("input", {"IN1"}, op_desc.add_inputs());
  BuildVar("output", {"OUT1"}, op_desc.add_outputs());

  auto attr = op_desc.mutable_attrs()->Add();
  attr->set_name("scale");
  attr->set_type(paddle::framework::proto::AttrType::FLOAT);
  attr->set_f(3.14);

  paddle::platform::CPUPlace cpu_place;
  paddle::framework::Scope scope;

  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
  auto* var = scope.Var("OUT1");
  var->GetMutable<paddle::framework::LoDTensorArray>();

  paddle::platform::DeviceContextPool& pool =
      paddle::platform::DeviceContextPool::Instance();
  auto* dev_ctx = pool.Get(cpu_place);

  paddle::framework::RuntimeContext ctx({}, {});
484 485
  paddle::framework::ExecutionContext exe_context(
      *(op.get()), scope, *dev_ctx, ctx);
H
hong 已提交
486 487 488 489 490

  ASSERT_EQ(exe_context.InputSize("input"), 1u);
  ASSERT_EQ(exe_context.OutputSize("output"), 1u);

  auto attr_map = exe_context.Attrs();
R
Ruibiao Chen 已提交
491
  ASSERT_EQ(PADDLE_GET(float, attr_map["scale"]), 3.14f);
H
hong 已提交
492 493 494
  ASSERT_EQ(exe_context.Type(), "test_operator");
}

495 496 497 498 499 500 501 502 503
namespace paddle {
namespace framework {

class GetLoDLevelTest : public OperatorWithKernel {
 public:
  using OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {
504 505 506 507
    OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "GetLoDLevelTest");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "GetLoDLevelTest");

    auto lod_level = ctx->GetLoDLevel("X");
508 509
    PADDLE_ENFORCE_GT(lod_level,
                      0,
510 511
                      paddle::platform::errors::InvalidArgument(
                          "The LoD level Input(X) should be larger than 0."));
512 513 514 515 516 517 518 519 520
  }
};

class SetLoDLevelTest : public OperatorWithKernel {
 public:
  using OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {
521 522
    OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "SetLoDLevelTest");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SetLoDLevelTest");
523 524 525 526 527 528 529
    ctx->SetLoDLevel("Out", 1);
  }
};

class GetSetLoDLevelTestMaker : public OpProtoAndCheckerMaker {
 public:
  void Make() {
530 531
    AddInput("X", "(phi::DenseTensor) Input Variable.");
    AddOutput("Out", "(phi::DenseTensor) Output Variable.");
T
tianshuo78520a 已提交
532
    AddComment("This Op is only for Get/SetLoDLevel interface test.");
533 534 535 536 537 538 539 540 541
  }
};

}  // namespace framework
}  // namespace paddle

REGISTER_OP_WITHOUT_GRADIENT(get_lod_level_test,
                             paddle::framework::GetLoDLevelTest,
                             paddle::framework::GetSetLoDLevelTestMaker);
542 543
REGISTER_OP_CPU_KERNEL(
    get_lod_level_test,
L
Leo Chen 已提交
544
    paddle::framework::EmptyTestKernel<phi::CPUContext, float>);
545 546 547 548

REGISTER_OP_WITHOUT_GRADIENT(set_lod_level_test,
                             paddle::framework::SetLoDLevelTest,
                             paddle::framework::GetSetLoDLevelTestMaker);
549 550
REGISTER_OP_CPU_KERNEL(
    set_lod_level_test,
L
Leo Chen 已提交
551
    paddle::framework::EmptyTestKernel<phi::CPUContext, float>);
552 553

void SetGetLoDLevelTestMain(std::string op_type) {
554
  paddle::framework::InitDevices({});
555 556 557 558 559 560 561 562 563 564
  paddle::framework::proto::OpDesc op_desc;
  op_desc.set_type(op_type);
  BuildVar("X", {"x.0"}, op_desc.add_inputs());
  BuildVar("Out", {"out.0"}, op_desc.add_outputs());

  paddle::platform::CPUPlace place;
  paddle::framework::Scope scope;

  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
  auto* x_var = scope.Var("x.0");
565
  auto* x = x_var->GetMutable<phi::DenseTensor>();
566
  x->mutable_data<float>(phi::make_ddim({64}), place);
567
  auto* out_var = scope.Var("out.0");
568
  out_var->GetMutable<phi::DenseTensor>();
569 570 571 572 573 574 575 576 577 578

  bool caught = false;
  std::string err_str =
      (op_type == "get_lod_level_test") ? "GetLoDLevel" : "SetLoDLevel";
  err_str +=
      " is only used in compile time. The calculation of output's actual lod "
      "is different among operators so that should be set in the runtime "
      "kernel.";
  try {
    op->Run(scope, place);
Z
Zeng Jinle 已提交
579
  } catch (paddle::platform::EnforceNotMet& err) {
580 581 582 583 584 585 586 587 588 589
    caught = true;
    std::string ex_msg = err.what();
    EXPECT_TRUE(ex_msg.find(err_str) != std::string::npos);
  }
  ASSERT_TRUE(caught);
}

TEST(GetLoDLevelTest, base) { SetGetLoDLevelTestMain("get_lod_level_test"); }

TEST(SetLoDLevelTest, base) { SetGetLoDLevelTestMain("set_lod_level_test"); }
590 591 592 593 594 595 596 597 598 599 600 601

namespace paddle {
namespace framework {

class OpUnusedVarTest : public OperatorWithKernel {
 public:
  using OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {}
  OpKernelType GetExpectedKernelType(
      const ExecutionContext& ctx) const override {
602 603
    return OpKernelType(
        proto::VarType::FP32, ctx.GetPlace(), phi::DataLayout::kAnyLayout);
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
  }
};

class OpUnusedVarTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
 public:
  void Make() {
    AddInput("X", "input of test op");
    AddOutput("Y", "output of test op");
    AddComment("This is test op for unused var check.");
  }
};

template <typename T>
class OpWithUnusedVarKernelTest : public OpKernel<T> {
 public:
  void Compute(const ExecutionContext& ctx) const {
    ASSERT_EQ(ctx.InputName("X"), "X");
    ASSERT_EQ(ctx.OutputName("Y"), "Y");
  }
};

template <typename T>
class OpWithoutUnusedVarKernelTest : public OpKernel<T> {
 public:
  void Compute(const ExecutionContext& ctx) const {
    ASSERT_EQ(ctx.InputName("X"), "X");
    ASSERT_EQ(ctx.OutputName("Y"), "Y");
631 632
    auto* x = ctx.Input<phi::DenseTensor>("X");
    auto* y = ctx.Output<phi::DenseTensor>("Y");
633 634 635 636 637 638 639 640 641
    ASSERT_NE(x, y);
    ASSERT_NE(y, nullptr);
  }
};

}  // namespace framework
}  // namespace paddle

REGISTER_OP_WITHOUT_GRADIENT(
642 643
    op_with_unused_var,
    paddle::framework::OpUnusedVarTest,
644 645 646 647 648 649
    paddle::framework::OpUnusedVarTestProtoAndCheckerMaker);

REGISTER_OP_CPU_KERNEL(op_with_unused_var,
                       paddle::framework::OpWithUnusedVarKernelTest<float>);

REGISTER_OP_WITHOUT_GRADIENT(
650 651
    op_without_unused_var,
    paddle::framework::OpUnusedVarTest,
652 653 654 655 656 657 658 659 660
    paddle::framework::OpUnusedVarTestProtoAndCheckerMaker);

REGISTER_OP_CPU_KERNEL(op_without_unused_var,
                       paddle::framework::OpWithoutUnusedVarKernelTest<float>);

// test with single input
TEST(OpWithUnusedVar, all) {
  // enable the unused_var_check
  FLAGS_enable_unused_var_check = true;
661
  paddle::framework::InitDevices();
662 663 664 665 666 667 668
  paddle::framework::proto::OpDesc op_desc;
  op_desc.set_type("op_with_unused_var");
  BuildVar("X", {"X"}, op_desc.add_inputs());
  BuildVar("Y", {"Y"}, op_desc.add_outputs());

  paddle::platform::CPUPlace cpu_place;
  paddle::framework::Scope scope;
669 670
  auto* x = scope.Var("X")->GetMutable<phi::DenseTensor>();
  auto* y = scope.Var("Y")->GetMutable<phi::DenseTensor>();
671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
  x->Resize({32, 64});
  y->Resize({32, 64});
  x->mutable_data<float>(cpu_place);
  y->mutable_data<float>(cpu_place);

  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
  // should throw exception
  ASSERT_THROW(op->Run(scope, cpu_place), paddle::platform::EnforceNotMet);
  FLAGS_enable_unused_var_check = false;
}

TEST(OpWithoutUnusedVar, all) {
  // enable the unused_var_check
  FLAGS_enable_unused_var_check = true;

686
  paddle::framework::InitDevices();
687 688 689 690 691 692 693
  paddle::framework::proto::OpDesc op_desc;
  op_desc.set_type("op_without_unused_var");
  BuildVar("X", {"X"}, op_desc.add_inputs());
  BuildVar("Y", {"Y"}, op_desc.add_outputs());

  paddle::platform::CPUPlace cpu_place;
  paddle::framework::Scope scope;
694 695
  auto* x = scope.Var("X")->GetMutable<phi::DenseTensor>();
  auto* y = scope.Var("Y")->GetMutable<phi::DenseTensor>();
696 697 698 699 700 701 702 703 704 705
  x->Resize({32, 64});
  y->Resize({32, 64});
  x->mutable_data<float>(cpu_place);
  y->mutable_data<float>(cpu_place);

  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
  // should not throw exception
  ASSERT_NO_THROW(op->Run(scope, cpu_place));
  FLAGS_enable_unused_var_check = false;
}