未验证 提交 a4a9ce0e 编写于 作者: T TeFeng Chen 提交者: GitHub

fix cinn_instruction_run_op_test when FLAGS_use_system_allocator=True (#47731)

上级 50c3632f
......@@ -39,92 +39,85 @@ namespace paddle::operators {
using framework::paddle2cinn::CinnCompiler;
TEST(CinnInstructionOpTest, TestWithElementwiseAdd) {
paddle::framework::InitDevices();
platform::SetNumThreads(1);
// cache test graph into CinnCompiler
const std::string& test_op_out_name = "cinn_instruction_run_op_out";
const std::string& add_op_out_name = "add_op_out";
auto compilation_key = CinnCompiler::GetInstance()->AddGraph(
CreateOnlyElementwiseAddGraph("x", "y", test_op_out_name));
// create necessary ops
auto cinn_instruction_run_op = paddle::framework::OpRegistry::CreateOp(
"cinn_instruction_run",
{{"X", {"x", "y"}}},
{{"Out", {test_op_out_name}}},
{{"cached_index", 0}, {"instruction_index", 0}});
auto cinn_launch_op = paddle::framework::OpRegistry::CreateOp(
"cinn_launch",
{{"X", {"x", "y"}}},
{{"Out", {test_op_out_name}}},
{{"compilation_key", compilation_key}});
// check case: a compiled object not cached before cinn_launch_op run,
// so a cinn_instruction_run_op will throw an error
framework::Scope scope;
platform::CPUPlace place;
InitVariablesWithRandomValue<float>({"x", "y"}, {10, 20}, place, &scope);
scope.Var(test_op_out_name)->GetMutable<LoDTensor>();
ASSERT_THROW(cinn_instruction_run_op->Run(scope, place),
paddle::platform::EnforceNotMet);
// run cinn_launch_op firstly to launch the compilation
// of the above graph and cache two compiled results
// of both type float and int
cinn_launch_op->Run(scope, place);
scope.EraseVars({"x", "y", test_op_out_name});
scope.Var(test_op_out_name)->GetMutable<LoDTensor>();
InitVariablesWithRandomValue<int>({"x", "y"}, {30, 40}, place, &scope);
cinn_launch_op->Run(scope, place);
// Run ops and check the computation results
auto run_and_check_fn = [&](const platform::Place& place) {
framework::Scope scope;
scope.Var(test_op_out_name)->GetMutable<LoDTensor>();
scope.Var(add_op_out_name)->GetMutable<LoDTensor>();
auto elementwise_add_op =
class TestCinnInstructionRunOp : public ::testing::Test {
public:
const char* test_op_out_name = "test_op_out";
const char* add_op_out_name = "add_op_out";
std::unique_ptr<framework::OperatorBase> cinn_launch_op;
std::unique_ptr<framework::OperatorBase> cinn_instruction_run_op;
std::unique_ptr<framework::OperatorBase> elementwise_add_op;
void SetUp() override {
auto compilation_key = CinnCompiler::GetInstance()->AddGraph(
CreateOnlyElementwiseAddGraph("x", "y", test_op_out_name));
// create necessary ops
cinn_launch_op = paddle::framework::OpRegistry::CreateOp(
"cinn_launch",
{{"X", {"x", "y"}}},
{{"Out", {test_op_out_name}}},
{{"compilation_key", compilation_key}});
cinn_instruction_run_op = paddle::framework::OpRegistry::CreateOp(
"cinn_instruction_run",
{{"X", {"x", "y"}}},
{{"Out", {test_op_out_name}}},
{{"cached_index", 0}, {"instruction_index", 0}});
elementwise_add_op =
paddle::framework::OpRegistry::CreateOp("elementwise_add",
{{"X", {"x"}}, {"Y", {"y"}}},
{{"Out", {add_op_out_name}}},
{{}});
}
void Compile(const platform::Place& place) {
// check case: a compiled object not cached before cinn_launch_op run,
// so a cinn_instruction_run_op will throw an error
framework::Scope scope;
InitVariablesWithRandomValue<float>({"x", "y"}, {10, 20}, place, &scope);
scope.Var(test_op_out_name)->GetMutable<LoDTensor>();
ASSERT_THROW(cinn_instruction_run_op->Run(scope, place),
paddle::platform::EnforceNotMet);
// run cinn_launch_op firstly to launch the compilation
// of the above graph and cache two compiled results
// of both type float and int
cinn_launch_op->Run(scope, place);
scope.EraseVars({"x", "y", test_op_out_name});
scope.Var(test_op_out_name)->GetMutable<LoDTensor>();
InitVariablesWithRandomValue<int>({"x", "y"}, {30, 40}, place, &scope);
cinn_launch_op->Run(scope, place);
}
// 1. check on type float
void RunAndCheck(const platform::Place& place) {
// Run ops and check the computation results
framework::Scope scope;
InitVariablesWithRandomValue<float>({"x", "y"}, {10, 20}, place, &scope);
cinn_instruction_run_op->SetAttr("cached_index", 0);
cinn_instruction_run_op->Run(scope, place);
scope.Var(test_op_out_name)->GetMutable<LoDTensor>();
scope.Var(add_op_out_name)->GetMutable<LoDTensor>();
elementwise_add_op->Run(scope, place);
cinn_launch_op->Run(scope, place);
CompareOpResult<float>(scope.GetVar(test_op_out_name),
scope.GetVar(add_op_out_name));
}
// 2. check on type int to indicate cinn_instruction_run op
// can mutable data according compiled result
scope.EraseVars({"x", "y", test_op_out_name, add_op_out_name});
scope.Var(test_op_out_name)->GetMutable<LoDTensor>();
scope.Var(add_op_out_name)->GetMutable<LoDTensor>();
void TearDown() override { CinnCompiler::GetInstance()->Clear(); }
};
InitVariablesWithRandomValue<int>({"x", "y"}, {30, 40}, place, &scope);
cinn_instruction_run_op->SetAttr("cached_index", 1);
cinn_instruction_run_op->Run(scope, place);
// need reconstruct elementwise_add_op to choose a new kernel with type int
elementwise_add_op =
paddle::framework::OpRegistry::CreateOp("elementwise_add",
{{"X", {"x"}}, {"Y", {"y"}}},
{{"Out", {add_op_out_name}}},
{{}});
elementwise_add_op->Run(scope, place);
CompareOpResult<int>(scope.GetVar(test_op_out_name),
scope.GetVar(add_op_out_name));
};
TEST_F(TestCinnInstructionRunOp, CPU) {
platform::CPUPlace place;
Compile(place);
RunAndCheck(place);
// the second run on the same place is to check the cache logic
RunAndCheck(place);
}
// CPU
run_and_check_fn(platform::CPUPlace());
run_and_check_fn(platform::CPUPlace());
#ifdef PADDLE_WITH_CUDA
// GPU
run_and_check_fn(platform::CUDAPlace());
run_and_check_fn(platform::CUDAPlace());
#endif
TEST_F(TestCinnInstructionRunOp, GPU) {
platform::CUDAPlace place;
Compile(place);
RunAndCheck(place);
RunAndCheck(place);
}
#endif
} // namespace paddle::operators
......@@ -49,7 +49,7 @@ using framework::paddle2cinn::CinnCompiler;
class TestCinnLaunchOp : public ::testing::Test {
public:
const char* test_op_out_name = "add_op_out";
const char* test_op_out_name = "test_op_out";
const char* add_op_out_name = "add_op_out";
std::unique_ptr<framework::OperatorBase> cinn_launch_op;
std::unique_ptr<framework::OperatorBase> elementwise_add_op;
......
......@@ -102,6 +102,7 @@ void InitVariablesWithRandomValue(const std::vector<std::string>& var_names,
tmp_tensor.mutable_data<DataType>(common_ddim, platform::CPUPlace());
for (const auto& var_name : var_names) {
auto* tensor = scope->Var(var_name)->GetMutable<LoDTensor>();
tensor->mutable_data<DataType>(common_ddim, place);
for (auto i = 0; i < tensor->numel(); ++i) {
tmp_data[i] = static_cast<DataType>(dist(engine));
}
......@@ -121,9 +122,9 @@ void CompareOpResult(Variable* test_out, Variable* expected_out) {
ASSERT_TRUE(expected_tensor.IsInitialized());
ASSERT_EQ(test_tensor.dims(), expected_tensor.dims());
const auto* test_data = test_tensor.data<DataType>();
const auto* excepted_data = expected_tensor.data<DataType>();
const auto* expected_data = expected_tensor.data<DataType>();
for (auto i = 0; i < expected_tensor.numel(); ++i) {
EXPECT_EQ(test_data[i], excepted_data[i]);
EXPECT_EQ(test_data[i], expected_data[i]);
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册