diff --git a/paddle/fluid/operators/cinn/cinn_instruction_run_op_test.cc b/paddle/fluid/operators/cinn/cinn_instruction_run_op_test.cc index 9ab49f8cdac8291472f570699c8fe3e91f04dc5a..47f8973514913a7e6a5caa8de43ab5bea41a41b9 100644 --- a/paddle/fluid/operators/cinn/cinn_instruction_run_op_test.cc +++ b/paddle/fluid/operators/cinn/cinn_instruction_run_op_test.cc @@ -39,92 +39,85 @@ namespace paddle::operators { using framework::paddle2cinn::CinnCompiler; -TEST(CinnInstructionOpTest, TestWithElementwiseAdd) { - paddle::framework::InitDevices(); - platform::SetNumThreads(1); - // cache test graph into CinnCompiler - const std::string& test_op_out_name = "cinn_instruction_run_op_out"; - const std::string& add_op_out_name = "add_op_out"; - auto compilation_key = CinnCompiler::GetInstance()->AddGraph( - CreateOnlyElementwiseAddGraph("x", "y", test_op_out_name)); - - // create necessary ops - auto cinn_instruction_run_op = paddle::framework::OpRegistry::CreateOp( - "cinn_instruction_run", - {{"X", {"x", "y"}}}, - {{"Out", {test_op_out_name}}}, - {{"cached_index", 0}, {"instruction_index", 0}}); - - auto cinn_launch_op = paddle::framework::OpRegistry::CreateOp( - "cinn_launch", - {{"X", {"x", "y"}}}, - {{"Out", {test_op_out_name}}}, - {{"compilation_key", compilation_key}}); - - // check case: a compiled object not cached before cinn_launch_op run, - // so a cinn_instruction_run_op will throw an error - framework::Scope scope; - platform::CPUPlace place; - InitVariablesWithRandomValue({"x", "y"}, {10, 20}, place, &scope); - scope.Var(test_op_out_name)->GetMutable(); - ASSERT_THROW(cinn_instruction_run_op->Run(scope, place), - paddle::platform::EnforceNotMet); - // run cinn_launch_op firstly to launch the compilation - // of the above graph and cache two compiled results - // of both type float and int - cinn_launch_op->Run(scope, place); - scope.EraseVars({"x", "y", test_op_out_name}); - scope.Var(test_op_out_name)->GetMutable(); - InitVariablesWithRandomValue({"x", "y"}, {30, 40}, place, &scope); - cinn_launch_op->Run(scope, place); - - // Run ops and check the computation results - auto run_and_check_fn = [&](const platform::Place& place) { - framework::Scope scope; - scope.Var(test_op_out_name)->GetMutable(); - scope.Var(add_op_out_name)->GetMutable(); - auto elementwise_add_op = +class TestCinnInstructionRunOp : public ::testing::Test { + public: + const char* test_op_out_name = "test_op_out"; + const char* add_op_out_name = "add_op_out"; + std::unique_ptr cinn_launch_op; + std::unique_ptr cinn_instruction_run_op; + std::unique_ptr elementwise_add_op; + + void SetUp() override { + auto compilation_key = CinnCompiler::GetInstance()->AddGraph( + CreateOnlyElementwiseAddGraph("x", "y", test_op_out_name)); + + // create necessary ops + cinn_launch_op = paddle::framework::OpRegistry::CreateOp( + "cinn_launch", + {{"X", {"x", "y"}}}, + {{"Out", {test_op_out_name}}}, + {{"compilation_key", compilation_key}}); + cinn_instruction_run_op = paddle::framework::OpRegistry::CreateOp( + "cinn_instruction_run", + {{"X", {"x", "y"}}}, + {{"Out", {test_op_out_name}}}, + {{"cached_index", 0}, {"instruction_index", 0}}); + elementwise_add_op = paddle::framework::OpRegistry::CreateOp("elementwise_add", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {add_op_out_name}}}, {{}}); + } + + void Compile(const platform::Place& place) { + // check case: a compiled object not cached before cinn_launch_op run, + // so a cinn_instruction_run_op will throw an error + framework::Scope scope; + InitVariablesWithRandomValue({"x", "y"}, {10, 20}, place, &scope); + scope.Var(test_op_out_name)->GetMutable(); + ASSERT_THROW(cinn_instruction_run_op->Run(scope, place), + paddle::platform::EnforceNotMet); + + // run cinn_launch_op firstly to launch the compilation + // of the above graph and cache two compiled results + // of both type float and int + cinn_launch_op->Run(scope, place); + scope.EraseVars({"x", "y", test_op_out_name}); + scope.Var(test_op_out_name)->GetMutable(); + InitVariablesWithRandomValue({"x", "y"}, {30, 40}, place, &scope); + cinn_launch_op->Run(scope, place); + } - // 1. check on type float + void RunAndCheck(const platform::Place& place) { + // Run ops and check the computation results + framework::Scope scope; InitVariablesWithRandomValue({"x", "y"}, {10, 20}, place, &scope); - cinn_instruction_run_op->SetAttr("cached_index", 0); - cinn_instruction_run_op->Run(scope, place); + scope.Var(test_op_out_name)->GetMutable(); + scope.Var(add_op_out_name)->GetMutable(); elementwise_add_op->Run(scope, place); + cinn_launch_op->Run(scope, place); CompareOpResult(scope.GetVar(test_op_out_name), scope.GetVar(add_op_out_name)); + } - // 2. check on type int to indicate cinn_instruction_run op - // can mutable data according compiled result - scope.EraseVars({"x", "y", test_op_out_name, add_op_out_name}); - scope.Var(test_op_out_name)->GetMutable(); - scope.Var(add_op_out_name)->GetMutable(); + void TearDown() override { CinnCompiler::GetInstance()->Clear(); } +}; - InitVariablesWithRandomValue({"x", "y"}, {30, 40}, place, &scope); - cinn_instruction_run_op->SetAttr("cached_index", 1); - cinn_instruction_run_op->Run(scope, place); - // need reconstruct elementwise_add_op to choose a new kernel with type int - elementwise_add_op = - paddle::framework::OpRegistry::CreateOp("elementwise_add", - {{"X", {"x"}}, {"Y", {"y"}}}, - {{"Out", {add_op_out_name}}}, - {{}}); - elementwise_add_op->Run(scope, place); - CompareOpResult(scope.GetVar(test_op_out_name), - scope.GetVar(add_op_out_name)); - }; +TEST_F(TestCinnInstructionRunOp, CPU) { + platform::CPUPlace place; + Compile(place); + RunAndCheck(place); + // the second run on the same place is to check the cache logic + RunAndCheck(place); +} - // CPU - run_and_check_fn(platform::CPUPlace()); - run_and_check_fn(platform::CPUPlace()); #ifdef PADDLE_WITH_CUDA - // GPU - run_and_check_fn(platform::CUDAPlace()); - run_and_check_fn(platform::CUDAPlace()); -#endif +TEST_F(TestCinnInstructionRunOp, GPU) { + platform::CUDAPlace place; + Compile(place); + RunAndCheck(place); + RunAndCheck(place); } +#endif } // namespace paddle::operators diff --git a/paddle/fluid/operators/cinn/cinn_launch_op_test.cc b/paddle/fluid/operators/cinn/cinn_launch_op_test.cc index 5b965573deefa6546fdf5d2c5786aa5e76284397..b26c97dda182e797434a54f5c7f7719e7b46e549 100644 --- a/paddle/fluid/operators/cinn/cinn_launch_op_test.cc +++ b/paddle/fluid/operators/cinn/cinn_launch_op_test.cc @@ -49,7 +49,7 @@ using framework::paddle2cinn::CinnCompiler; class TestCinnLaunchOp : public ::testing::Test { public: - const char* test_op_out_name = "add_op_out"; + const char* test_op_out_name = "test_op_out"; const char* add_op_out_name = "add_op_out"; std::unique_ptr cinn_launch_op; std::unique_ptr elementwise_add_op; diff --git a/paddle/fluid/operators/cinn/test_helper.h b/paddle/fluid/operators/cinn/test_helper.h index 3ed72f7fe2f1c157883339c71779cad4f71b26d3..eed1b72420bfdf199f2bd2e1682816e5196368cb 100644 --- a/paddle/fluid/operators/cinn/test_helper.h +++ b/paddle/fluid/operators/cinn/test_helper.h @@ -102,6 +102,7 @@ void InitVariablesWithRandomValue(const std::vector& var_names, tmp_tensor.mutable_data(common_ddim, platform::CPUPlace()); for (const auto& var_name : var_names) { auto* tensor = scope->Var(var_name)->GetMutable(); + tensor->mutable_data(common_ddim, place); for (auto i = 0; i < tensor->numel(); ++i) { tmp_data[i] = static_cast(dist(engine)); } @@ -121,9 +122,9 @@ void CompareOpResult(Variable* test_out, Variable* expected_out) { ASSERT_TRUE(expected_tensor.IsInitialized()); ASSERT_EQ(test_tensor.dims(), expected_tensor.dims()); const auto* test_data = test_tensor.data(); - const auto* excepted_data = expected_tensor.data(); + const auto* expected_data = expected_tensor.data(); for (auto i = 0; i < expected_tensor.numel(); ++i) { - EXPECT_EQ(test_data[i], excepted_data[i]); + EXPECT_EQ(test_data[i], expected_data[i]); } }