operator_test.cc 24.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14
#include "paddle/fluid/framework/operator.h"
D
dzhwinter 已提交
15

16
#include "gtest/gtest.h"
Y
Yi Wang 已提交
17 18
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_registry.h"
19
#include "paddle/fluid/platform/errors.h"
20
#include "paddle/fluid/platform/init.h"
Q
Qiao Longfei 已提交
21

22 23
DECLARE_bool(enable_unused_var_check);

Q
Qiao Longfei 已提交
24 25 26
namespace paddle {
namespace framework {

Q
Qiao Longfei 已提交
27 28 29
static int op_run_num = 0;

class OpWithoutKernelTest : public OperatorBase {
Q
Qiao Longfei 已提交
30
 public:
31 32 33 34
  OpWithoutKernelTest(const std::string& type,
                      const VariableNameMap& inputs,
                      const VariableNameMap& outputs,
                      const AttributeMap& attrs)
Y
Yu Yang 已提交
35
      : OperatorBase(type, inputs, outputs, attrs), x(1) {}
36 37 38 39

 private:
  void RunImpl(const Scope& scope,
               const platform::Place& place) const override {
Y
Yu Yang 已提交
40 41 42 43
    ++op_run_num;
    ASSERT_EQ(static_cast<int>(inputs_.size()), 1);
    ASSERT_EQ(static_cast<int>(outputs_.size()), 1);
    ASSERT_EQ(scope.FindVar(inputs_.at("input")[0]), nullptr);
Q
Qiao Longfei 已提交
44
    ASSERT_EQ(x, 1);
Y
Yu Yang 已提交
45
    ASSERT_NE(scope.FindVar(outputs_.at("output")[0]), nullptr);
Q
Qiao Longfei 已提交
46
  }
Q
Qiao Longfei 已提交
47 48

 public:
Y
Yu Yang 已提交
49
  int x{0};
Q
Qiao Longfei 已提交
50 51
};

D
dzhwinter 已提交
52
class OpWithoutKernelCheckerMaker : public OpProtoAndCheckerMaker {
Q
Qiao Longfei 已提交
53
 public:
Y
Yu Yang 已提交
54
  void Make() {
Q
Qiao Longfei 已提交
55 56
    AddInput("input", "input of test op");
    AddOutput("output", "output of test op");
Q
Qiao Longfei 已提交
57
    AddAttr<float>("scale", "scale of cosine op");
X
Xin Pan 已提交
58 59
    AddAttr<int>("kernel_sub_type", "kernels with different implementations.")
        .SetDefault(0);
Q
Qiao Longfei 已提交
60 61 62 63 64 65 66
    AddComment("This is test op");
  }
};

}  // namespace framework
}  // namespace paddle

Y
Yu Yang 已提交
67 68
static void BuildVar(const std::string& param_name,
                     std::initializer_list<const char*> arguments,
69
                     paddle::framework::proto::OpDesc::Var* var) {
Y
Yu Yang 已提交
70 71 72 73 74 75
  var->set_parameter(param_name);
  for (auto& arg_name : arguments) {
    *var->mutable_arguments()->Add() = arg_name;
  }
}

D
dzhwinter 已提交
76 77 78
REGISTER_OP_WITHOUT_GRADIENT(test_operator,
                             paddle::framework::OpWithoutKernelTest,
                             paddle::framework::OpWithoutKernelCheckerMaker);
Q
Qiao Longfei 已提交
79 80

TEST(OperatorBase, all) {
81
  paddle::framework::InitDevices();
82
  paddle::framework::proto::OpDesc op_desc;
Q
Qiao Longfei 已提交
83
  op_desc.set_type("test_operator");
Y
Yu Yang 已提交
84 85
  BuildVar("input", {"IN1"}, op_desc.add_inputs());
  BuildVar("output", {"OUT1"}, op_desc.add_outputs());
Y
Yu Yang 已提交
86

Q
Qiao Longfei 已提交
87 88
  auto attr = op_desc.mutable_attrs()->Add();
  attr->set_name("scale");
89
  attr->set_type(paddle::framework::proto::AttrType::FLOAT);
Q
Qiao Longfei 已提交
90
  attr->set_f(3.14);
Q
Qiao Longfei 已提交
91

D
dzhwinter 已提交
92
  paddle::platform::CPUPlace cpu_place;
Y
Yu Yang 已提交
93
  paddle::framework::Scope scope;
Q
Qiao Longfei 已提交
94

95
  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
D
dongzhihong 已提交
96
  scope.Var("OUT1");
Q
Qiao Longfei 已提交
97
  ASSERT_EQ(paddle::framework::op_run_num, 0);
D
dzhwinter 已提交
98
  op->Run(scope, cpu_place);
Q
Qiao Longfei 已提交
99
  ASSERT_EQ(paddle::framework::op_run_num, 1);
Q
Qiao Longfei 已提交
100 101 102 103 104
}

namespace paddle {
namespace framework {

X
Xin Pan 已提交
105 106
static int special_type_value = 1;

Q
Qiao Longfei 已提交
107 108
class OpKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
109
  void Make() {
Y
Yan Chunwei 已提交
110 111 112 113
    AddInput("x", "input of test op");
    AddOutput("y", "output of test op");
    AddAttr<float>("scale", "scale of cosine op")
        .SetDefault(1.0)
F
fengjiayi 已提交
114
        .GreaterThan(0.0);
X
Xin Pan 已提交
115 116
    AddAttr<int>("kernel_sub_type", "kernels with different implementations.")
        .SetDefault(0);
Q
Qiao Longfei 已提交
117 118 119 120
    AddComment("This is test op");
  }
};

Q
Qiao Longfei 已提交
121
static int cpu_kernel_run_num = 0;
X
Xin Pan 已提交
122
static int cpu_kernel2_run_num = 0;
Q
Qiao Longfei 已提交
123

Q
Qiao Longfei 已提交
124
class OpWithKernelTest : public OperatorWithKernel {
Y
Yu Yang 已提交
125 126 127
 public:
  using OperatorWithKernel::OperatorWithKernel;

Y
Yu Yang 已提交
128
 protected:
129
  void InferShape(framework::InferShapeContext* ctx) const override {}
130 131
  OpKernelType GetExpectedKernelType(
      const ExecutionContext& ctx) const override {
X
Xin Pan 已提交
132
    int sub_type = ctx.Attr<int>("kernel_sub_type");
133 134
    return OpKernelType(proto::VarType::FP32,
                        ctx.GetPlace(),
X
Xin Pan 已提交
135
                        framework::DataLayout::kAnyLayout,
136 137
                        framework::LibraryType::kPlain,
                        sub_type);
Y
Yu Yang 已提交
138
  }
Q
Qiao Longfei 已提交
139 140
};

141
template <typename T1, typename T2>
Y
Yu Yang 已提交
142
class CPUKernelTest : public OpKernel<float> {
Q
Qiao Longfei 已提交
143
 public:
144
  void Compute(const ExecutionContext& ctx) const {
H
hong 已提交
145
    std::cout << ctx.DebugString() << std::endl;
Q
Qiao Longfei 已提交
146
    cpu_kernel_run_num++;
H
hong 已提交
147 148
    ASSERT_EQ(ctx.InputName("x"), "IN1");
    ASSERT_EQ(ctx.OutputName("y"), "OUT1");
149
    auto* x = ctx.Input<phi::DenseTensor>("X");
150
    ASSERT_EQ(x, nullptr);
Y
Yan Chunwei 已提交
151 152 153
  }
};

X
Xin Pan 已提交
154 155 156 157
template <typename T1, typename T2>
class CPUKernel2Test : public OpKernel<float> {
 public:
  void Compute(const ExecutionContext& ctx) const {
H
hong 已提交
158
    std::cout << ctx.DebugString() << std::endl;
X
Xin Pan 已提交
159
    cpu_kernel2_run_num++;
H
hong 已提交
160 161
    ASSERT_EQ(ctx.InputName("x"), "IN1");
    ASSERT_EQ(ctx.OutputName("y"), "OUT1");
X
Xin Pan 已提交
162 163 164
  }
};

Y
Yan Chunwei 已提交
165 166 167
class OpKernelTestMultiInputsProtoAndCheckerMaker
    : public OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
168
  void Make() {
Y
Yu Yang 已提交
169
    AddInput("xs", "inputs of test op").AsDuplicable();
Y
Yan Chunwei 已提交
170
    AddInput("k", "input of test op");
Y
Yu Yang 已提交
171
    AddOutput("ys", "outputs of test op").AsDuplicable();
Y
Yan Chunwei 已提交
172 173
    AddAttr<float>("scale", "scale of cosine op")
        .SetDefault(1.0)
F
fengjiayi 已提交
174
        .GreaterThan(0.0);
X
Xin Pan 已提交
175 176
    AddAttr<int>("kernel_sub_type", "kernels with different implementations.")
        .SetDefault(0);
Y
Yan Chunwei 已提交
177 178 179 180
    AddComment("This is test op");
  }
};

Y
Yu Yang 已提交
181
class CPUKernalMultiInputsTest : public OpKernel<float> {
Y
Yan Chunwei 已提交
182
 public:
183
  void Compute(const ExecutionContext& ctx) const {
H
hong 已提交
184
    auto xs = ctx.InputNames("xs");
Y
Yan Chunwei 已提交
185 186 187 188 189
    ASSERT_EQ(xs.size(), 3UL);
    ASSERT_EQ(xs[0], "x0");
    ASSERT_EQ(xs[1], "x1");
    ASSERT_EQ(xs[2], "x2");

190
    auto inVar0 = ctx.MultiInputVar("xs");
191
    ASSERT_EQ(inVar0.size(), 3U);
192 193 194 195 196

    auto intVar1 = ctx.InputVar("k");
    ASSERT_NE(intVar1, nullptr);

    auto outVar0 = ctx.MultiOutputVar("ys");
197
    ASSERT_EQ(outVar0.size(), 2U);
198

199
    auto inTensor0 = ctx.MultiInput<phi::DenseTensor>("xs");
200
    ASSERT_EQ(inTensor0.size(), 3U);
201

202
    auto intTensor1 = ctx.Input<phi::DenseTensor>("k");
203 204
    ASSERT_NE(intTensor1, nullptr);

205
    auto outTensor0 = ctx.MultiOutput<phi::DenseTensor>("ys");
206
    ASSERT_EQ(outTensor0.size(), 2U);
207

H
hong 已提交
208
    auto k = ctx.InputName("k");
Y
Yan Chunwei 已提交
209 210
    ASSERT_EQ(k, "k0");

H
hong 已提交
211
    auto ys = ctx.OutputNames("ys");
Y
Yan Chunwei 已提交
212 213 214
    ASSERT_EQ(ys.size(), 2UL);
    ASSERT_EQ(ys[0], "y0");
    ASSERT_EQ(ys[1], "y1");
Q
Qiao Longfei 已提交
215 216 217
  }
};

Y
Yu Yang 已提交
218 219 220
}  // namespace framework
}  // namespace paddle

F
fengjiayi 已提交
221
REGISTER_OP_WITHOUT_GRADIENT(
222 223
    op_with_kernel,
    paddle::framework::OpWithKernelTest,
F
fengjiayi 已提交
224
    paddle::framework::OpKernelTestProtoAndCheckerMaker);
X
Xin Pan 已提交
225

X
Xin Pan 已提交
226 227
REGISTER_OP_CPU_KERNEL(op_with_kernel,
                       paddle::framework::CPUKernelTest<float, float>);
X
Xin Pan 已提交
228 229

REGISTER_OP_KERNEL_WITH_CUSTOM_TYPE(
230 231 232 233
    op_with_kernel,
    CPU,
    paddle::platform::CPUPlace,
    MY_SPECIAL_NAME,
X
Xin Pan 已提交
234
    paddle::framework::special_type_value,
X
Xin Pan 已提交
235
    paddle::framework::CPUKernel2Test<float, float>);
Q
Qiao Longfei 已提交
236

Y
Yan Chunwei 已提交
237
// test with single input
Q
Qiao Longfei 已提交
238
TEST(OpKernel, all) {
239
  paddle::framework::InitDevices();
240
  paddle::framework::proto::OpDesc op_desc;
Q
Qiao Longfei 已提交
241
  op_desc.set_type("op_with_kernel");
Y
Fix CI  
Yu Yang 已提交
242 243
  BuildVar("x", {"IN1"}, op_desc.add_inputs());
  BuildVar("y", {"OUT1"}, op_desc.add_outputs());
Y
Yu Yang 已提交
244

Q
Qiao Longfei 已提交
245 246
  auto attr = op_desc.mutable_attrs()->Add();
  attr->set_name("scale");
247
  attr->set_type(paddle::framework::proto::AttrType::FLOAT);
Q
Qiao Longfei 已提交
248 249
  attr->set_f(3.14);

D
dzhwinter 已提交
250
  paddle::platform::CPUPlace cpu_place;
Y
Yu Yang 已提交
251
  paddle::framework::Scope scope;
Q
Qiao Longfei 已提交
252

253
  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
Q
Qiao Longfei 已提交
254
  ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 0);
D
dzhwinter 已提交
255
  op->Run(scope, cpu_place);
X
Xin Pan 已提交
256
  // kerne_sub_type = 0, hence cpu_kernel is called, cpu_kernel2 is not called.
Q
Qiao Longfei 已提交
257
  ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 1);
X
Xin Pan 已提交
258 259 260 261 262 263 264 265
  ASSERT_EQ(paddle::framework::cpu_kernel2_run_num, 0);

  attr = op_desc.mutable_attrs()->Add();
  attr->set_name("kernel_sub_type");
  attr->set_type(paddle::framework::proto::AttrType::INT);
  attr->set_i(1);
  auto op2 = paddle::framework::OpRegistry::CreateOp(op_desc);
  op2->Run(scope, cpu_place);
X
Xin Pan 已提交
266
  // kerne_sub_type = 1, hence cpu_kernel2 is called, cpu_kernel is not called.
X
Xin Pan 已提交
267 268
  ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 1);
  ASSERT_EQ(paddle::framework::cpu_kernel2_run_num, 1);
Q
Qiao Longfei 已提交
269
}
Y
Yan Chunwei 已提交
270

F
fengjiayi 已提交
271
REGISTER_OP_WITHOUT_GRADIENT(
272 273
    op_multi_inputs_with_kernel,
    paddle::framework::OpWithKernelTest,
F
fengjiayi 已提交
274
    paddle::framework::OpKernelTestMultiInputsProtoAndCheckerMaker);
Y
Yan Chunwei 已提交
275 276 277 278 279
REGISTER_OP_CPU_KERNEL(op_multi_inputs_with_kernel,
                       paddle::framework::CPUKernalMultiInputsTest);

// test with multi inputs
TEST(OpKernel, multi_inputs) {
280
  paddle::framework::InitDevices();
281
  paddle::framework::proto::OpDesc op_desc;
D
dzhwinter 已提交
282

Y
Yan Chunwei 已提交
283
  op_desc.set_type("op_multi_inputs_with_kernel");
Y
Yu Yang 已提交
284 285 286
  BuildVar("xs", {"x0", "x1", "x2"}, op_desc.add_inputs());
  BuildVar("k", {"k0"}, op_desc.add_inputs());
  BuildVar("ys", {"y0", "y1"}, op_desc.add_outputs());
Y
Yu Yang 已提交
287

Y
Yan Chunwei 已提交
288 289
  auto attr = op_desc.mutable_attrs()->Add();
  attr->set_name("scale");
290
  attr->set_type(paddle::framework::proto::AttrType::FLOAT);
Y
Yan Chunwei 已提交
291 292
  attr->set_f(3.14);

D
dzhwinter 已提交
293
  paddle::platform::CPUPlace cpu_place;
Y
Yu Yang 已提交
294
  paddle::framework::Scope scope;
295 296 297 298 299 300
  scope.Var("x0")->GetMutable<paddle::framework::LoDTensor>();
  scope.Var("x1")->GetMutable<paddle::framework::LoDTensor>();
  scope.Var("x2")->GetMutable<paddle::framework::LoDTensor>();
  scope.Var("k0")->GetMutable<paddle::framework::LoDTensor>();
  scope.Var("y0")->GetMutable<paddle::framework::LoDTensor>();
  scope.Var("y1")->GetMutable<paddle::framework::LoDTensor>();
Y
Yan Chunwei 已提交
301

302
  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
D
dzhwinter 已提交
303
  op->Run(scope, cpu_place);
Y
Yan Chunwei 已提交
304
}
305

M
minqiyang 已提交
306
TEST(VarNameTest, all) {
307 308
  std::string var_name("X");
  std::string grad_var_name = paddle::framework::GradVarName(var_name);
M
minqiyang 已提交
309
  ASSERT_EQ(grad_var_name, "X@GRAD");
310
  std::string original_var_name =
M
minqiyang 已提交
311
      paddle::framework::GradOriginalVarName(grad_var_name);
M
minqiyang 已提交
312
  ASSERT_EQ(original_var_name, "X");
M
minqiyang 已提交
313
  original_var_name = paddle::framework::GradOriginalVarName(original_var_name);
M
minqiyang 已提交
314 315 316 317 318
  ASSERT_EQ(original_var_name, "X");

  std::string var_name_2("XYZ");
  grad_var_name = paddle::framework::GradVarName(var_name_2);
  ASSERT_EQ(grad_var_name, "XYZ@GRAD");
M
minqiyang 已提交
319
  original_var_name = paddle::framework::GradOriginalVarName(grad_var_name);
M
minqiyang 已提交
320
  ASSERT_EQ(original_var_name, "XYZ");
M
minqiyang 已提交
321
  original_var_name = paddle::framework::GradOriginalVarName(original_var_name);
M
minqiyang 已提交
322 323 324 325 326
  ASSERT_EQ(original_var_name, "XYZ");

  std::string var_name_3("");
  grad_var_name = paddle::framework::GradVarName(var_name_3);
  ASSERT_EQ(grad_var_name, "@GRAD");
M
minqiyang 已提交
327
  original_var_name = paddle::framework::GradOriginalVarName(grad_var_name);
M
minqiyang 已提交
328
  ASSERT_EQ(original_var_name, "");
M
minqiyang 已提交
329
  original_var_name = paddle::framework::GradOriginalVarName(original_var_name);
M
minqiyang 已提交
330
  ASSERT_EQ(original_var_name, "");
331
}
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347

namespace paddle {
namespace framework {

class IndicateLoDTensorDataTypeTest : public OperatorWithKernel {
 public:
  using OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {}
  OpKernelType GetExpectedKernelType(
      const ExecutionContext& ctx) const override {
    auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "LoDTensor");
    return framework::OpKernelType(data_type, ctx.device_context());
  }
};
348

349 350 351
class IndicateLoDTensorDataTypeTestProtoMaker : public OpProtoAndCheckerMaker {
 public:
  void Make() {
352
    AddInput("LoDTensor", "Input of phi::DenseTensor type Variable.");
T
tianshuo78520a 已提交
353
    AddComment("This Op is only for IndicateVarDataType interface test.");
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
  }
};

class IndicateSelectedRowsDataTypeTest : public OperatorWithKernel {
 public:
  using OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {}
  OpKernelType GetExpectedKernelType(
      const ExecutionContext& ctx) const override {
    auto data_type =
        OperatorWithKernel::IndicateVarDataType(ctx, "SelectedRows");
    return framework::OpKernelType(data_type, ctx.device_context());
  }
};
class IndicateSelectedRowsDataTypeTestProtoMaker
    : public OpProtoAndCheckerMaker {
 public:
  void Make() {
    AddInput("SelectedRows", "Input of SelectedRows type Variable.");
T
tianshuo78520a 已提交
375
    AddComment("This Op is only for IndicateVarDataType interface test.");
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
  }
};

class IndicateOtherDataTypeTest : public OperatorWithKernel {
 public:
  using OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {}
  OpKernelType GetExpectedKernelType(
      const ExecutionContext& ctx) const override {
    auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Other");
    return framework::OpKernelType(data_type, ctx.device_context());
  }
};
class IndicateOtherDataTypeTestProtoMaker : public OpProtoAndCheckerMaker {
 public:
  void Make() {
    AddInput("Other", "Input of Other type Variable");
T
tianshuo78520a 已提交
395
    AddComment("This Op is only for IndicateVarDataType interface test.");
396 397 398 399
  }
};

template <typename DeviceContext, typename T>
400
class EmptyTestKernel : public OpKernel<T> {
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
 public:
  void Compute(const ExecutionContext& ctx) const {}
};

}  // namespace framework
}  // namespace paddle

REGISTER_OP_WITHOUT_GRADIENT(
    indicate_lod_tensor_data_type_test,
    paddle::framework::IndicateLoDTensorDataTypeTest,
    paddle::framework::IndicateLoDTensorDataTypeTestProtoMaker);
REGISTER_OP_WITHOUT_GRADIENT(
    indicate_selected_rows_data_type_test,
    paddle::framework::IndicateSelectedRowsDataTypeTest,
    paddle::framework::IndicateSelectedRowsDataTypeTestProtoMaker);
REGISTER_OP_WITHOUT_GRADIENT(
417 418
    indicate_other_data_type_test,
    paddle::framework::IndicateOtherDataTypeTest,
419 420
    paddle::framework::IndicateOtherDataTypeTestProtoMaker);

421 422
REGISTER_OP_CPU_KERNEL(
    indicate_lod_tensor_data_type_test,
L
Leo Chen 已提交
423
    paddle::framework::EmptyTestKernel<phi::CPUContext, int>);
424 425
REGISTER_OP_CPU_KERNEL(
    indicate_selected_rows_data_type_test,
L
Leo Chen 已提交
426
    paddle::framework::EmptyTestKernel<phi::CPUContext, int>);
427 428
REGISTER_OP_CPU_KERNEL(
    indicate_other_data_type_test,
L
Leo Chen 已提交
429
    paddle::framework::EmptyTestKernel<phi::CPUContext, int>);
430 431

TEST(IndicateVarDataTypeTest, lodtensor) {
432
  paddle::framework::InitDevices();
433 434 435 436 437 438 439 440 441 442 443 444 445 446
  paddle::framework::proto::OpDesc op_desc;
  op_desc.set_type("indicate_lod_tensor_data_type_test");
  BuildVar("LoDTensor", {"lodtensor_1"}, op_desc.add_inputs());

  paddle::platform::CPUPlace cpu_place;
  paddle::framework::Scope scope;

  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
  auto* var = scope.Var("lodtensor_1");
  var->GetMutable<paddle::framework::LoDTensor>();

  bool caught = false;
  try {
    op->Run(scope, cpu_place);
Z
Zeng Jinle 已提交
447
  } catch (paddle::platform::EnforceNotMet& err) {
448 449 450 451
    caught = true;
    std::string ex_msg = err.what();
    EXPECT_TRUE(
        ex_msg.find(
452
            "The indicate_lod_tensor_data_type_test Op's Input Variable "
453 454
            "`LoDTensor` contains uninitialized phi::DenseTensor.") !=
        std::string::npos);
455 456 457 458 459
  }
  ASSERT_TRUE(caught);
}

TEST(IndicateVarDataTypeTest, selectedrows) {
460
  paddle::framework::InitDevices();
461 462 463 464 465 466 467 468 469
  paddle::framework::proto::OpDesc op_desc;
  op_desc.set_type("indicate_selected_rows_data_type_test");
  BuildVar("SelectedRows", {"selected_rows_1"}, op_desc.add_inputs());

  paddle::platform::CPUPlace cpu_place;
  paddle::framework::Scope scope;

  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
  auto* var = scope.Var("selected_rows_1");
470
  var->GetMutable<phi::SelectedRows>();
471 472 473 474

  bool caught = false;
  try {
    op->Run(scope, cpu_place);
Z
Zeng Jinle 已提交
475
  } catch (paddle::platform::EnforceNotMet& err) {
476 477 478
    caught = true;
    std::string ex_msg = err.what();
    EXPECT_TRUE(
479 480
        ex_msg.find("The indicate_selected_rows_data_type_test Op's "
                    "Input Variable `SelectedRows` contains uninitialized "
481
                    "phi::DenseTensor.") != std::string::npos);
482 483 484 485 486
  }
  ASSERT_TRUE(caught);
}

TEST(IndicateVarDataTypeTest, other) {
487
  paddle::framework::InitDevices();
488 489
  paddle::framework::proto::OpDesc op_desc;
  op_desc.set_type("indicate_other_data_type_test");
L
liym27 已提交
490
  BuildVar("Other", {"lod_rank_table_1"}, op_desc.add_inputs());
491 492 493 494 495

  paddle::platform::CPUPlace cpu_place;
  paddle::framework::Scope scope;

  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
L
liym27 已提交
496 497
  auto* var = scope.Var("lod_rank_table_1");
  var->GetMutable<paddle::framework::LoDRankTable>();
498 499 500 501

  bool caught = false;
  try {
    op->Run(scope, cpu_place);
Z
Zeng Jinle 已提交
502
  } catch (paddle::platform::EnforceNotMet& err) {
503 504
    caught = true;
    std::string ex_msg = err.what();
L
liym27 已提交
505 506 507
    EXPECT_TRUE(
        ex_msg.find(
            "The Input Variable(Other) of "
508
            "(indicate_other_data_type_test) Operator used to "
L
liym27 已提交
509
            "determine kernel data type "
510
            "is empty or not LoDTensor or SelectedRows or LoDTensorArray.") !=
L
liym27 已提交
511
        std::string::npos);
512 513 514
  }
  ASSERT_TRUE(caught);
}
515

H
hong 已提交
516
TEST(ExecutionContextAttrAndInOut, new_api) {
517
  paddle::framework::InitDevices();
H
hong 已提交
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
  paddle::framework::proto::OpDesc op_desc;
  op_desc.set_type("test_operator");
  BuildVar("input", {"IN1"}, op_desc.add_inputs());
  BuildVar("output", {"OUT1"}, op_desc.add_outputs());

  auto attr = op_desc.mutable_attrs()->Add();
  attr->set_name("scale");
  attr->set_type(paddle::framework::proto::AttrType::FLOAT);
  attr->set_f(3.14);

  paddle::platform::CPUPlace cpu_place;
  paddle::framework::Scope scope;

  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
  auto* var = scope.Var("OUT1");
  var->GetMutable<paddle::framework::LoDTensorArray>();

  paddle::platform::DeviceContextPool& pool =
      paddle::platform::DeviceContextPool::Instance();
  auto* dev_ctx = pool.Get(cpu_place);

  paddle::framework::RuntimeContext ctx({}, {});
540 541
  paddle::framework::ExecutionContext exe_context(
      *(op.get()), scope, *dev_ctx, ctx);
H
hong 已提交
542 543 544 545 546

  ASSERT_EQ(exe_context.InputSize("input"), 1u);
  ASSERT_EQ(exe_context.OutputSize("output"), 1u);

  auto attr_map = exe_context.Attrs();
R
Ruibiao Chen 已提交
547
  ASSERT_EQ(PADDLE_GET(float, attr_map["scale"]), 3.14f);
H
hong 已提交
548 549 550
  ASSERT_EQ(exe_context.Type(), "test_operator");
}

551 552 553 554 555 556 557 558 559
namespace paddle {
namespace framework {

class GetLoDLevelTest : public OperatorWithKernel {
 public:
  using OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {
560 561 562 563
    OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "GetLoDLevelTest");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "GetLoDLevelTest");

    auto lod_level = ctx->GetLoDLevel("X");
564 565
    PADDLE_ENFORCE_GT(lod_level,
                      0,
566 567
                      paddle::platform::errors::InvalidArgument(
                          "The LoD level Input(X) should be larger than 0."));
568 569 570 571 572 573 574 575 576
  }
};

class SetLoDLevelTest : public OperatorWithKernel {
 public:
  using OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {
577 578
    OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "SetLoDLevelTest");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SetLoDLevelTest");
579 580 581 582 583 584 585 586 587
    ctx->SetLoDLevel("Out", 1);
  }
};

class GetSetLoDLevelTestMaker : public OpProtoAndCheckerMaker {
 public:
  void Make() {
    AddInput("X", "(LoDTensor) Input Variable.");
    AddOutput("Out", "(LoDTensor) Output Variable.");
T
tianshuo78520a 已提交
588
    AddComment("This Op is only for Get/SetLoDLevel interface test.");
589 590 591 592 593 594 595 596 597
  }
};

}  // namespace framework
}  // namespace paddle

REGISTER_OP_WITHOUT_GRADIENT(get_lod_level_test,
                             paddle::framework::GetLoDLevelTest,
                             paddle::framework::GetSetLoDLevelTestMaker);
598 599
REGISTER_OP_CPU_KERNEL(
    get_lod_level_test,
L
Leo Chen 已提交
600
    paddle::framework::EmptyTestKernel<phi::CPUContext, float>);
601 602 603 604

REGISTER_OP_WITHOUT_GRADIENT(set_lod_level_test,
                             paddle::framework::SetLoDLevelTest,
                             paddle::framework::GetSetLoDLevelTestMaker);
605 606
REGISTER_OP_CPU_KERNEL(
    set_lod_level_test,
L
Leo Chen 已提交
607
    paddle::framework::EmptyTestKernel<phi::CPUContext, float>);
608 609

void SetGetLoDLevelTestMain(std::string op_type) {
610
  paddle::framework::InitDevices({});
611 612 613 614 615 616 617 618 619 620 621
  paddle::framework::proto::OpDesc op_desc;
  op_desc.set_type(op_type);
  BuildVar("X", {"x.0"}, op_desc.add_inputs());
  BuildVar("Out", {"out.0"}, op_desc.add_outputs());

  paddle::platform::CPUPlace place;
  paddle::framework::Scope scope;

  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
  auto* x_var = scope.Var("x.0");
  auto* x = x_var->GetMutable<paddle::framework::LoDTensor>();
622
  x->mutable_data<float>(phi::make_ddim({64}), place);
623 624 625 626 627 628 629 630 631 632 633 634
  auto* out_var = scope.Var("out.0");
  out_var->GetMutable<paddle::framework::LoDTensor>();

  bool caught = false;
  std::string err_str =
      (op_type == "get_lod_level_test") ? "GetLoDLevel" : "SetLoDLevel";
  err_str +=
      " is only used in compile time. The calculation of output's actual lod "
      "is different among operators so that should be set in the runtime "
      "kernel.";
  try {
    op->Run(scope, place);
Z
Zeng Jinle 已提交
635
  } catch (paddle::platform::EnforceNotMet& err) {
636 637 638 639 640 641 642 643 644 645
    caught = true;
    std::string ex_msg = err.what();
    EXPECT_TRUE(ex_msg.find(err_str) != std::string::npos);
  }
  ASSERT_TRUE(caught);
}

TEST(GetLoDLevelTest, base) { SetGetLoDLevelTestMain("get_lod_level_test"); }

TEST(SetLoDLevelTest, base) { SetGetLoDLevelTestMain("set_lod_level_test"); }
646 647 648 649 650 651 652 653 654 655 656 657

namespace paddle {
namespace framework {

class OpUnusedVarTest : public OperatorWithKernel {
 public:
  using OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {}
  OpKernelType GetExpectedKernelType(
      const ExecutionContext& ctx) const override {
658 659
    return OpKernelType(proto::VarType::FP32,
                        ctx.GetPlace(),
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
                        framework::DataLayout::kAnyLayout);
  }
};

class OpUnusedVarTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
 public:
  void Make() {
    AddInput("X", "input of test op");
    AddOutput("Y", "output of test op");
    AddComment("This is test op for unused var check.");
  }
};

template <typename T>
class OpWithUnusedVarKernelTest : public OpKernel<T> {
 public:
  void Compute(const ExecutionContext& ctx) const {
    ASSERT_EQ(ctx.InputName("X"), "X");
    ASSERT_EQ(ctx.OutputName("Y"), "Y");
  }
};

template <typename T>
class OpWithoutUnusedVarKernelTest : public OpKernel<T> {
 public:
  void Compute(const ExecutionContext& ctx) const {
    ASSERT_EQ(ctx.InputName("X"), "X");
    ASSERT_EQ(ctx.OutputName("Y"), "Y");
688 689
    auto* x = ctx.Input<phi::DenseTensor>("X");
    auto* y = ctx.Output<phi::DenseTensor>("Y");
690 691 692 693 694 695 696 697 698
    ASSERT_NE(x, y);
    ASSERT_NE(y, nullptr);
  }
};

}  // namespace framework
}  // namespace paddle

REGISTER_OP_WITHOUT_GRADIENT(
699 700
    op_with_unused_var,
    paddle::framework::OpUnusedVarTest,
701 702 703 704 705 706
    paddle::framework::OpUnusedVarTestProtoAndCheckerMaker);

REGISTER_OP_CPU_KERNEL(op_with_unused_var,
                       paddle::framework::OpWithUnusedVarKernelTest<float>);

REGISTER_OP_WITHOUT_GRADIENT(
707 708
    op_without_unused_var,
    paddle::framework::OpUnusedVarTest,
709 710 711 712 713 714 715 716 717
    paddle::framework::OpUnusedVarTestProtoAndCheckerMaker);

REGISTER_OP_CPU_KERNEL(op_without_unused_var,
                       paddle::framework::OpWithoutUnusedVarKernelTest<float>);

// test with single input
TEST(OpWithUnusedVar, all) {
  // enable the unused_var_check
  FLAGS_enable_unused_var_check = true;
718
  paddle::framework::InitDevices();
719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
  paddle::framework::proto::OpDesc op_desc;
  op_desc.set_type("op_with_unused_var");
  BuildVar("X", {"X"}, op_desc.add_inputs());
  BuildVar("Y", {"Y"}, op_desc.add_outputs());

  paddle::platform::CPUPlace cpu_place;
  paddle::framework::Scope scope;
  auto* x = scope.Var("X")->GetMutable<paddle::framework::LoDTensor>();
  auto* y = scope.Var("Y")->GetMutable<paddle::framework::LoDTensor>();
  x->Resize({32, 64});
  y->Resize({32, 64});
  x->mutable_data<float>(cpu_place);
  y->mutable_data<float>(cpu_place);

  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
  // should throw exception
  ASSERT_THROW(op->Run(scope, cpu_place), paddle::platform::EnforceNotMet);
  FLAGS_enable_unused_var_check = false;
}

TEST(OpWithoutUnusedVar, all) {
  // enable the unused_var_check
  FLAGS_enable_unused_var_check = true;

743
  paddle::framework::InitDevices();
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
  paddle::framework::proto::OpDesc op_desc;
  op_desc.set_type("op_without_unused_var");
  BuildVar("X", {"X"}, op_desc.add_inputs());
  BuildVar("Y", {"Y"}, op_desc.add_outputs());

  paddle::platform::CPUPlace cpu_place;
  paddle::framework::Scope scope;
  auto* x = scope.Var("X")->GetMutable<paddle::framework::LoDTensor>();
  auto* y = scope.Var("Y")->GetMutable<paddle::framework::LoDTensor>();
  x->Resize({32, 64});
  y->Resize({32, 64});
  x->mutable_data<float>(cpu_place);
  y->mutable_data<float>(cpu_place);

  auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
  // should not throw exception
  ASSERT_NO_THROW(op->Run(scope, cpu_place));
  FLAGS_enable_unused_var_check = false;
}