未验证 提交 228eb898 编写于 作者: C CtfGo 提交者: GitHub

cinn_launch_op: skip checking input variables must be used (#37119)

Modify serveral implements on CinnLaunchOp:
1. Skip checking input variables must be used 
2. Move current helper functions to a CinnlaunchContext 
上级 6486e242
......@@ -171,6 +171,7 @@ endif()
if (WITH_CINN)
op_library(cinn_launch_op SRCS cinn_launch_op.cc cinn_launch_op.cu.cc DEPS transform_desc cinn_compiler cinn ${OP_HEADER_DEPS})
cc_test(cinn_launch_op_test SRCS cinn_launch_op_test.cc DEPS cinn_compiler cinn_launch_op elementwise_add_op)
set_tests_properties(cinn_launch_op_test PROPERTIES ENVIRONMENT OMP_NUM_THREADS=1)
endif()
# FIXME(typhoonzero): operator deps may not needed.
......
......@@ -62,90 +62,102 @@ void DebugCinnCompiledResult(const CinnCompiledObject& result) {
<< "]";
}
std::vector<std::string> MapPaddleVariablesToCinn(
const std::vector<std::string>& paddle_names,
const std::unordered_map<std::string, std::string>& paddle2cinn_varmap) {
std::vector<std::string> result;
result.reserve(result.size());
void LaunchCinnExecution(const CinnCompiledObject& compiled_obj,
const CinnLaunchContext& context) {
compiled_obj.runtime_program->Execute(&context.FinalizeArguments());
}
CinnLaunchContext::CinnLaunchContext(const CinnCompiledObject& compiled_obj)
: paddle2cinn_varmap_(compiled_obj.paddle2cinn_varmap),
cinn_scope_(compiled_obj.scope) {
auto var_names = cinn_scope_->var_names();
cinn_variable_names_.reserve(var_names.size());
std::transform(
paddle_names.begin(), paddle_names.end(), std::back_inserter(result),
[&paddle2cinn_varmap](const std::string& pd_name) {
PADDLE_ENFORCE_GT(paddle2cinn_varmap.count(pd_name), 0,
platform::errors::NotFound(
"Not found the corresponding cinn variable "
"of paddle variable(%s) in compilation result.",
pd_name));
return paddle2cinn_varmap.at(pd_name);
});
return result;
var_names.begin(), var_names.end(),
std::inserter(cinn_variable_names_, cinn_variable_names_.end()),
[](const auto& name_view) { return std::string(name_view.data()); });
}
std::vector<CinnTensor> GetCinnTensorsFromCompiledScope(
const std::vector<std::string>& cinn_names, const CinnScope& cinn_scope) {
std::vector<CinnTensor> result;
result.reserve(cinn_names.size());
std::transform(cinn_names.begin(), cinn_names.end(),
std::back_inserter(result),
[&cinn_scope](const std::string& var_name) {
PADDLE_ENFORCE_NOT_NULL(
cinn_scope.FindVar(var_name),
platform::errors::NotFound(
"Variable(%s) not found in cinn scope.", var_name));
return cinn_scope.GetTensor(var_name);
});
return result;
bool CinnLaunchContext::IsVariableUsed(const std::string& paddle_name) {
return paddle2cinn_varmap_.count(paddle_name) > 0 &&
cinn_variable_names_.count(paddle2cinn_varmap_.at(paddle_name)) > 0;
}
CinnTensor CinnLaunchContext::GetCinnTensor(const std::string& var_name) {
PADDLE_ENFORCE_GT(cinn_variable_names_.count(var_name), 0,
platform::errors::NotFound(
"Variable(%s) not found in cinn scope.", var_name));
return cinn_scope_->GetTensor(var_name);
}
std::vector<std::string> CinnLaunchContext::GetInternalVariableNames() {
std::unordered_set<std::string> all_parameters(cinn_variable_names_);
std::for_each(name2argument_.begin(), name2argument_.end(),
[&all_parameters](const auto& name2arg) {
all_parameters.erase(name2arg.first);
});
return {all_parameters.begin(), all_parameters.end()};
}
void CinnLaunchContext::MutableTensorData(const std::string& var_name,
const platform::Place& place,
LoDTensor* paddle_tensor,
bool is_internal_var) {
auto cinn_name = var_name;
if (!is_internal_var) {
PADDLE_ENFORCE_EQ(IsVariableUsed(var_name), true,
platform::errors::InvalidArgument(
"Paddle variable(%s) not used by cinn", var_name));
cinn_name = paddle2cinn_varmap_.at(var_name);
}
auto cinn_tensor = GetCinnTensor(cinn_name);
// TODO(CtfGo): support mutable corresponding c++ type after CINN ready
paddle_tensor->mutable_data<float>(
framework::make_ddim(cinn_tensor->shape().data()), place);
}
void CheckTensorEquivalent(const std::string& paddle_name,
const LoDTensor* paddle_tensor,
const CinnTensor& cinn_tensor) {
void CinnLaunchContext::CheckTensorEquivalent(const std::string& paddle_name,
const LoDTensor& paddle_tensor,
const CinnTensor& cinn_tensor) {
PADDLE_ENFORCE_EQ(
paddle_tensor->IsInitialized(), true,
paddle_tensor.IsInitialized(), true,
platform::errors::InvalidArgument(
"The tensor in variable(%s) is not initialized.", paddle_name));
"Tensor in variable(%s) is not initialized.", paddle_name));
// check dimension
auto cinn_dims = framework::make_ddim(cinn_tensor->shape().data());
PADDLE_ENFORCE_EQ(paddle_tensor->dims(), cinn_dims,
platform::errors::InvalidArgument(
"The tensor dimension in variable(%s) "
"is not equivalent, paddle is [%s] "
"but cinn is [%s].",
paddle_name, paddle_tensor->dims(), cinn_dims));
PADDLE_ENFORCE_EQ(paddle_tensor.dims(), cinn_dims,
platform::errors::PreconditionNotMet(
"Tensors' shape in variable(%s) are not equivalent, "
"paddle's shape = [%s], but cinn's shape = [%s].",
paddle_name, paddle_tensor.dims(), cinn_dims));
// TODO(CtfGo): check the underlying data type after CINN ready
}
void TensorMutableDataWithCinnInfo(const platform::Place& place,
const CinnTensor& cinn_tensor,
LoDTensor* paddle_tensor) {
// TODO(CtfGo): support mutable corresponding c++ type after CINN ready
paddle_tensor->mutable_data<float>(
framework::make_ddim(cinn_tensor->shape().data()), place);
}
std::vector<std::string> SeperateTempVar(
const CinnScope& cinn_scope,
const std::vector<std::string>& input_cinn_names,
const std::vector<std::string>& output_cinn_names) {
auto cinn_var_names = cinn_scope.var_names();
std::unordered_set<std::string> all_cinn_names;
all_cinn_names.reserve(cinn_var_names.size());
std::transform(
cinn_var_names.begin(), cinn_var_names.end(),
std::inserter(all_cinn_names, all_cinn_names.end()),
[](const auto& name_view) { return std::string(name_view.data()); });
void CinnLaunchContext::AssignExternalVariable(const std::string& paddle_name,
LoDTensor* paddle_tensor) {
PADDLE_ENFORCE_EQ(IsVariableUsed(paddle_name), true,
platform::errors::InvalidArgument(
"Paddle variable(%s) not used by cinn", paddle_name));
auto exclude_fn = [&all_cinn_names](const auto& cinn_name) {
all_cinn_names.erase(cinn_name);
};
const auto& cinn_name = paddle2cinn_varmap_.at(paddle_name);
CheckTensorEquivalent(paddle_name, *paddle_tensor, GetCinnTensor(cinn_name));
return SetArgument(cinn_name, paddle_tensor);
}
std::for_each(input_cinn_names.begin(), input_cinn_names.end(), exclude_fn);
std::for_each(output_cinn_names.begin(), output_cinn_names.end(), exclude_fn);
return {all_cinn_names.begin(), all_cinn_names.end()};
void CinnLaunchContext::AssignInternalVariable(const std::string& cinn_name,
LoDTensor* paddle_tensor) {
PADDLE_ENFORCE_GT(cinn_variable_names_.count(cinn_name), 0,
platform::errors::InvalidArgument(
"Variable(%s) not found in cinn socpe.", cinn_name));
CheckTensorEquivalent(cinn_name, *paddle_tensor, GetCinnTensor(cinn_name));
return SetArgument(cinn_name, paddle_tensor);
}
std::unique_ptr<cinn_buffer_t> ShareTensorWithCinnBuffer(LoDTensor* tensor) {
std::unique_ptr<cinn_buffer_t> CinnLaunchContext::ShareTensorWithCinnBuffer(
LoDTensor* tensor) {
// convert paddle dimensions array to cinn format
std::vector<cinn_dimension_t> cinn_dims(tensor->dims().size());
for (auto i = 0; i < tensor->dims().size(); ++i) {
......@@ -159,17 +171,29 @@ std::unique_ptr<cinn_buffer_t> ShareTensorWithCinnBuffer(LoDTensor* tensor) {
return cinn_buffer;
}
void CheckArgumentsNotMissed(
const CinnScope& cinn_scope,
const std::map<std::string, cinn_pod_value_t>& name2argument) {
auto cinn_var_names = cinn_scope.var_names();
std::for_each(cinn_var_names.begin(), cinn_var_names.end(),
[&name2argument](const auto& name_view) {
PADDLE_ENFORCE_GT(
name2argument.count(name_view.data()), 0,
platform::errors::InvalidArgument(
"Parameter(%s) is not assgined.", name_view.data()));
void CinnLaunchContext::SetArgument(const std::string& cinn_name,
LoDTensor* paddle_tensor) {
auto buffer = ShareTensorWithCinnBuffer(paddle_tensor);
name2argument_.emplace(cinn_name, buffer.get());
hold_buffers_.emplace_back(std::move(buffer));
VLOG(4) << "SetArgument-" << name2argument_.size() << ": "
<< "name(" << cinn_name << "), "
<< "type(" << framework::DataTypeToString(paddle_tensor->type())
<< "), dims(" << paddle_tensor->dims() << ").";
}
const std::map<std::string, cinn_pod_value_t>&
CinnLaunchContext::FinalizeArguments() const {
// Check all execution parameters are assigned valued.
std::for_each(cinn_variable_names_.begin(), cinn_variable_names_.end(),
[this](const auto& var_name) {
PADDLE_ENFORCE_GT(name2argument_.count(var_name), 0,
platform::errors::InvalidArgument(
"Variable(%s) is missed for launching "
"compiled program execution",
var_name));
});
return name2argument_;
}
} // namespace details
......
......@@ -17,10 +17,11 @@
#include <memory>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include "cinn/hlir/framework/graph_compiler.h"
#include "cinn/hlir/framework/scope.h"
#include "cinn/runtime/cinn_runtime.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/paddle2cinn/cinn_compiler.h"
......@@ -40,48 +41,75 @@ using CinnCompiledObject = framework::paddle2cinn::CinnCompiledObject;
namespace details {
class CinnLaunchContext {
public:
explicit CinnLaunchContext(const CinnCompiledObject& compiled_obj);
// Return whether a Paddle variable used on compiled kernels
bool IsVariableUsed(const std::string& var_name);
// Allocate buffer to a Paddle tensor with assginment information from CINN
void MutableTensorData(const std::string& var_name,
const platform::Place& place, LoDTensor* paddle_tensor,
bool is_internal_var = false);
// Assign tensor buffer to input or output variables
void AssignExternalVariable(const std::string& var_name, LoDTensor* tensor);
// Assign tensor buffer to internal variables
void AssignInternalVariable(const std::string& var_name, LoDTensor* tensor);
// Extract internal variable names from CinnScope
// by excluding used input and output variables
std::vector<std::string> GetInternalVariableNames();
// Finalize all execution arguments and return them
const std::map<std::string, cinn_pod_value_t>& FinalizeArguments() const;
private:
// Get CinnTensor with CINN variable name
CinnTensor GetCinnTensor(const std::string& var_name);
// Check whether tensors from Paddle and CINN of the same variable
// are equivalent in type and dimension
void CheckTensorEquivalent(const std::string& var_name,
const LoDTensor& paddle_tensor,
const CinnTensor& cinn_tensor);
// Share the buffer of a Paddle tensor to CINN by delivering memory address
// to a cinn_buffer_t object
std::unique_ptr<cinn_buffer_t> ShareTensorWithCinnBuffer(LoDTensor* tensor);
// Set an argument with (cinn name)->(paddle tensor) pair
void SetArgument(const std::string& cinn_name, LoDTensor* paddle_tensor);
private:
// a variable name map from paddle to cinn
const std::unordered_map<std::string, std::string>& paddle2cinn_varmap_;
// the variable scope of cinn
const std::shared_ptr<CinnScope> cinn_scope_;
// all variables used by compiled executable program
std::unordered_set<std::string> cinn_variable_names_;
// because a cinn_pod_value_t does not own the cinn_buffer_t object,
// an extra stroage is necessary to keep the object and it can
// not be released until runtime program finish execution.
std::vector<std::unique_ptr<cinn_buffer_t>> hold_buffers_;
// name to execution argument
std::map<std::string, cinn_pod_value_t> name2argument_;
};
// Tranform Paddle place to CINN target
const ::cinn::common::Target& PlaceToCinnTarget(const platform::Place& place);
// Print detailed compilation result of graph for debug
void DebugCinnCompiledResult(const CinnCompiledObject& result);
// Transform names of Paddle variables to CINN ones
std::vector<std::string> MapPaddleVariablesToCinn(
const std::vector<std::string>& paddle_names,
const std::unordered_map<std::string, std::string>& paddle2cinn_varmap);
// Get CinnTensor with variable name from CinnScope
std::vector<CinnTensor> GetCinnTensorsFromCompiledScope(
const std::vector<std::string>& cinn_names, const CinnScope& cinn_scope);
// Check whether tensors from Paddle and CINN respectively
// of the same variable are equivalent in type and dimension
void CheckTensorEquivalent(const std::string& paddle_name,
const LoDTensor* paddle_tensor,
const CinnTensor& cinn_tensor);
// Allocate buffer to a Paddle tensor with assginment information from CINN
void TensorMutableDataWithCinnInfo(const platform::Place& place,
const CinnTensor& cinn_tensor,
LoDTensor* paddle_tensor);
// Extract temporary variable names from CinnScope by excluding
// input and output variables
std::vector<std::string> SeperateTempVar(
const CinnScope& cinn_scope,
const std::vector<std::string>& input_cinn_names,
const std::vector<std::string>& output_cinn_names);
// Share the buffer of a Paddle tensor to CINN by packing memory address
// in a cinn_buffer_t object
std::unique_ptr<cinn_buffer_t> ShareTensorWithCinnBuffer(LoDTensor* tensor);
// Check all execution arguments are carried
void CheckArgumentsNotMissed(
const CinnScope& cinn_scope,
const std::map<std::string, cinn_pod_value_t>& name2argument);
// Launch cinn to execute compiled executable program and wait done
void LaunchCinnExecution(const CinnCompiledObject& compiled_obj,
const CinnLaunchContext& context);
} // namespace details
template <typename DeviceContext, typename T>
......@@ -117,74 +145,47 @@ class CinnLaunchOpKernel : public framework::OpKernel<T> {
compilation_key, inputs_name2tensor, target);
details::DebugCinnCompiledResult(cinn_compiled_object);
const auto& cinn_runtime_program = cinn_compiled_object.runtime_program;
const auto& cinn_scope = *(cinn_compiled_object.scope);
const auto& paddle2cinn_varmap = cinn_compiled_object.paddle2cinn_varmap;
// Step 3. Initialize all variables needed for cinn compiled runtime
// program execution, and share buffers of their tensors into
// cinn buffers through execution arguments passed.
VLOG(4) << "CinnLaunchOp initialize variables and prepare arguments";
std::map<std::string, cinn_pod_value_t> name2argument;
// because a cinn_pod_value_t does not own the cinn_buffer_t object,
// an extra stroage is necessary to keep the object and it can
// not be released until runtime program finish execution.
std::vector<std::unique_ptr<cinn_buffer_t>> hold_buffers;
// 3.1 Prepare input variables: because tensors of input variables have
auto launch_context =
std::make_unique<details::CinnLaunchContext>(cinn_compiled_object);
// Step 3. Prepare arguments needed for the compiled executable program.
VLOG(4) << "CinnLaunchOp prepare arguments";
// 3.1 Prepare input variables: tensors of input variables have
// been initialized before graph compiled, just check the
// equiality between tensors of paddle and cinn.
auto input_cinn_names = details::MapPaddleVariablesToCinn(
input_variable_names, paddle2cinn_varmap);
auto input_cinn_tensors =
details::GetCinnTensorsFromCompiledScope(input_cinn_names, cinn_scope);
for (auto i = 0; i < input_variable_names.size(); ++i) {
const auto& var_name = input_variable_names.at(i);
const auto& cinn_name = input_cinn_names.at(i);
auto* tensor = scope.GetVar(var_name)->GetMutable<LoDTensor>();
details::CheckTensorEquivalent(var_name, tensor,
input_cinn_tensors.at(i));
VLOG(4) << "Prepare input argument-" << i << ":"
<< "name(" << var_name << "->" << cinn_name << "), "
<< "tensor(type:" << tensor->type() << ","
<< "dims:" << tensor->dims() << ").";
auto buffer = details::ShareTensorWithCinnBuffer(tensor);
name2argument.emplace(input_cinn_names.at(i), buffer.get());
hold_buffers.emplace_back(std::move(buffer));
for (const auto& var_name : input_variable_names) {
if (!launch_context->IsVariableUsed(var_name)) {
// some input variables don't need for cinn because they are
// eliminated by optimized passes or some cinn operators use
// less variables
VLOG(4) << "Input variable(" << var_name << ") not used by cinn";
continue;
}
launch_context->AssignExternalVariable(
var_name, scope.GetVar(var_name)->GetMutable<LoDTensor>());
}
// 3.2 Prepare output variables: all output variables should
// be initialized and allocated buffer in advance before
// be initialized and allocated buffer before
// the runtime program start execution, the compilation result
// includes details of their buffer assginment which used by
// Paddle tensor allocation. For those variables allocated yet,
// includes details of their buffer assginment and we use that to
// allocate space in Paddle. For those variables allocated yet,
// like persistable parameters, just check the equiality between
// Paddle allocation and CINN buffer assginment.
auto output_variable_names = ctx.OutputNames(kOutputs);
auto output_cinn_names = details::MapPaddleVariablesToCinn(
output_variable_names, paddle2cinn_varmap);
auto output_cinn_tensors =
details::GetCinnTensorsFromCompiledScope(output_cinn_names, cinn_scope);
for (auto i = 0; i < output_variable_names.size(); ++i) {
const auto& var_name = output_variable_names.at(i);
const auto& cinn_name = output_cinn_names.at(i);
for (const auto var_name : output_variable_names) {
PADDLE_ENFORCE_EQ(launch_context->IsVariableUsed(var_name), true,
platform::errors::InvalidArgument(
"Output variable(%s) not used by cinn", var_name));
auto* tensor = scope.GetVar(var_name)->GetMutable<LoDTensor>();
if (tensor->IsInitialized()) {
details::CheckTensorEquivalent(var_name, tensor,
output_cinn_tensors.at(i));
} else {
details::TensorMutableDataWithCinnInfo(place, output_cinn_tensors.at(i),
tensor);
if (!tensor->IsInitialized()) {
launch_context->MutableTensorData(var_name, place, tensor);
}
VLOG(4) << "Prepare output argument-" << i << ":"
<< "name(" << var_name << "->" << cinn_name << "), "
<< "tensor(type:" << tensor->type() << ","
<< "dims:" << tensor->dims() << ").";
auto buffer = details::ShareTensorWithCinnBuffer(tensor);
name2argument.emplace(output_cinn_names.at(i), buffer.get());
hold_buffers.emplace_back(std::move(buffer));
launch_context->AssignExternalVariable(
var_name, scope.GetVar(var_name)->GetMutable<LoDTensor>());
}
// 3.3 Prepare internal or temporary variables: Create a temporary
......@@ -193,31 +194,16 @@ class CinnLaunchOpKernel : public framework::OpKernel<T> {
// Here we directly use the names from CinnScope as Paddle variable
// names, because they will not be used outside the graph
// and should be destructed after computation finished.
auto temp_variable_names = details::SeperateTempVar(
cinn_scope, input_cinn_names, output_cinn_names);
auto internal_variable_names = launch_context->GetInternalVariableNames();
auto temp_scope = scope.NewTmpScope();
if (!temp_variable_names.empty()) {
auto temp_cinn_tensors = details::GetCinnTensorsFromCompiledScope(
temp_variable_names, cinn_scope);
for (auto i = 0; i < temp_variable_names.size(); ++i) {
const auto& var_name = temp_variable_names.at(i);
auto* tensor = temp_scope->Var(var_name)->GetMutable<LoDTensor>();
details::TensorMutableDataWithCinnInfo(place, temp_cinn_tensors.at(i),
tensor);
VLOG(4) << "Prepare temporary argument-" << i << ":"
<< "name(" << var_name << "->" << var_name << "), "
<< "tensor(type:" << tensor->type() << ","
<< "dims:" << tensor->dims() << ").";
auto buffer = details::ShareTensorWithCinnBuffer(tensor);
name2argument.emplace(var_name, buffer.get());
hold_buffers.emplace_back(std::move(buffer));
}
for (const auto& var_name : internal_variable_names) {
auto* tensor = temp_scope->Var(var_name)->GetMutable<LoDTensor>();
launch_context->MutableTensorData(var_name, place, tensor, true);
launch_context->AssignInternalVariable(var_name, tensor);
}
// Step 4. Launch CINN to execute the compiled runtime program
details::CheckArgumentsNotMissed(cinn_scope, name2argument);
cinn_runtime_program->Execute(&name2argument);
// Step 4. Launch CINN to execute the compiled executable program
details::LaunchCinnExecution(cinn_compiled_object, *launch_context);
VLOG(4) << "CinnLaunchOp launch execution done.";
}
};
......
......@@ -14,6 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/cinn_launch_op.h"
#include <stdlib.h>
#include <mutex>
#include <random>
#include <string>
#include "gtest/gtest.h"
......@@ -188,114 +189,134 @@ TEST(CinnLaunchOpHelperTest, TestPlaceToCinnTarget) {
paddle::platform::EnforceNotMet);
}
TEST(CinnLaunchOpHelperTest, TestMapPaddleVariablesToCinn) {
std::unordered_map<std::string, std::string> varmap(
{{"var1", "cinn_var1"}, {"var2", "cinn_var2"}, {"var3", "cinn_var3"}});
const CinnCompiledObject& GetDefaultCompiledObj() {
static std::once_flag initialized;
static CinnCompiledObject compiled_object;
std::call_once(initialized, [&compiled_object]() {
auto& scope = compiled_object.scope;
scope = std::make_shared<CinnScope>();
scope->Var<CinnTensor>("cinn_var1");
scope->GetTensor("cinn_var1")->Resize(CinnShape({3, 4}));
scope->Var<CinnTensor>("cinn_var2");
scope->GetTensor("cinn_var2")->Resize(CinnShape({6, 7, 8}));
scope->Var<CinnTensor>("cinn_var3");
scope->GetTensor("cinn_var3")->Resize(CinnShape({10, 16}));
auto& varmap = compiled_object.paddle2cinn_varmap;
varmap = {
{"var1", "cinn_var1"}, {"var3", "cinn_var3"}, {"var4", "cinn_var4"}};
});
return compiled_object;
}
auto cinn_names = MapPaddleVariablesToCinn({"var1", "var3"}, varmap);
ASSERT_EQ(cinn_names.size(), 2);
EXPECT_EQ(cinn_names, std::vector<std::string>({"cinn_var1", "cinn_var3"}));
ASSERT_THROW(MapPaddleVariablesToCinn({"var1", "not_exist"}, varmap),
paddle::platform::EnforceNotMet);
TEST(CinnLaunchContextTest, TestIsVariableUsed) {
auto launch_context =
std::make_unique<CinnLaunchContext>(GetDefaultCompiledObj());
ASSERT_EQ(launch_context->IsVariableUsed("var1"), true);
ASSERT_EQ(launch_context->IsVariableUsed("var4"), false);
}
TEST(CinnLaunchOpHelperTest, TestGetCinnTensorsFromCompiledScope) {
CinnScope cinn_scope;
cinn_scope.Var<CinnTensor>("cinn_var1");
cinn_scope.Var<CinnTensor>("cinn_var2");
cinn_scope.Var<CinnTensor>("cinn_var3");
auto cinn_tensors =
GetCinnTensorsFromCompiledScope({"cinn_var1", "cinn_var3"}, cinn_scope);
ASSERT_EQ(cinn_tensors.size(), 2);
ASSERT_EQ(cinn_tensors.front().get(),
cinn_scope.GetTensor("cinn_var1").get());
ASSERT_EQ(cinn_tensors.back().get(), cinn_scope.GetTensor("cinn_var3").get());
ASSERT_THROW(
GetCinnTensorsFromCompiledScope({"cinn_var1", "not_exist"}, cinn_scope),
paddle::platform::EnforceNotMet);
TEST(CinnLaunchContextTest, TestGetInternalVariableNames) {
auto launch_context =
std::make_unique<CinnLaunchContext>(GetDefaultCompiledObj());
auto internal_variable_names = launch_context->GetInternalVariableNames();
ASSERT_EQ(internal_variable_names.size(), 1);
EXPECT_EQ(internal_variable_names.front(), "cinn_var2");
}
TEST(CinnLaunchOpHelperTest, TestCheckTensorEquivalent) {
TEST(CinnLaunchContextTest, TestMutableTensorData) {
platform::CPUPlace place;
framework::Scope scope;
auto* tensor1 = scope.Var("var1")->GetMutable<LoDTensor>();
tensor1->mutable_data<float>(framework::make_ddim({5, 8}), place);
auto* tensor2 = scope.Var("var2")->GetMutable<LoDTensor>();
CinnScope cinn_scope;
cinn_scope.Var<CinnTensor>("cinn_var1");
auto cinn_tensor1 = cinn_scope.GetTensor("cinn_var1");
cinn_tensor1->Resize(CinnShape({5, 8}));
cinn_tensor1->set_type(::cinn::common::type_of<float>());
ASSERT_NO_THROW(CheckTensorEquivalent("var1", tensor1, cinn_tensor1));
auto tensor2 = scope.Var("var2")->GetMutable<LoDTensor>();
ASSERT_THROW(CheckTensorEquivalent("var2", tensor2, cinn_tensor1),
auto launch_context =
std::make_unique<CinnLaunchContext>(GetDefaultCompiledObj());
// mutable_data on external variable
ASSERT_NO_THROW(launch_context->MutableTensorData("var1", place, tensor1));
ASSERT_TRUE(tensor1->IsInitialized());
ASSERT_EQ(tensor1->dims(), framework::make_ddim({3, 4}));
ASSERT_THROW(launch_context->MutableTensorData("not_exist", place, tensor1),
paddle::platform::EnforceNotMet);
cinn_tensor1->Resize(CinnShape({5, 7}));
ASSERT_THROW(CheckTensorEquivalent("var1", tensor1, cinn_tensor1),
paddle::platform::EnforceNotMet);
// mutable_data on internal variable
ASSERT_NO_THROW(
launch_context->MutableTensorData("cinn_var2", place, tensor2, true));
ASSERT_TRUE(tensor2->IsInitialized());
ASSERT_EQ(tensor2->dims(), framework::make_ddim({6, 7, 8}));
}
TEST(CinnLaunchOpHelperTest, TestTensorMutableDataWithCinnInfo) {
TEST(CinnLaunchContextTest, TestCheckTensorEquivalent) {
auto launch_context =
std::make_unique<CinnLaunchContext>(GetDefaultCompiledObj());
platform::CPUPlace place;
framework::Scope scope;
auto* tensor1 = scope.Var("var1")->GetMutable<LoDTensor>();
CinnScope cinn_scope;
cinn_scope.Var<CinnTensor>("cinn_var1");
auto cinn_tensor1 = cinn_scope.GetTensor("cinn_var1");
cinn_tensor1->Resize(CinnShape({5, 8}));
ASSERT_NO_THROW(TensorMutableDataWithCinnInfo(place, cinn_tensor1, tensor1));
ASSERT_TRUE(tensor1->IsInitialized());
ASSERT_EQ(tensor1->dims(), framework::make_ddim({5, 8}));
// CheckTensorEquivalent: tensor is not initialized
ASSERT_THROW(launch_context->AssignExternalVariable("var1", tensor1),
paddle::platform::EnforceNotMet);
// CheckTensorEquivalent: tensor dimension not equivalent
tensor1->mutable_data<float>(framework::make_ddim({3, 5}), place);
ASSERT_THROW(launch_context->AssignExternalVariable("var1", tensor1),
paddle::platform::EnforceNotMet);
}
TEST(CinnLaunchOpHelperTest, TestSeperateTempVar) {
CinnScope cinn_scope;
cinn_scope.Var<CinnTensor>("cinn_var1");
cinn_scope.Var<CinnTensor>("cinn_var2");
cinn_scope.Var<CinnTensor>("cinn_var3");
cinn_scope.Var<CinnTensor>("cinn_var4");
auto temp_names =
SeperateTempVar(cinn_scope, {"cinn_var1", "cinn_var2"}, {"cinn_var4"});
ASSERT_EQ(temp_names.size(), 1);
EXPECT_EQ(temp_names.front(), "cinn_var3");
TEST(CinnLaunchContextTest, TestAssignVariablePreCondition) {
auto launch_context =
std::make_unique<CinnLaunchContext>(GetDefaultCompiledObj());
platform::CPUPlace place;
framework::Scope scope;
auto* tensor4 = scope.Var("var4")->GetMutable<LoDTensor>();
// not used
ASSERT_THROW(launch_context->AssignExternalVariable("var4", tensor4),
paddle::platform::EnforceNotMet);
// not found
ASSERT_THROW(launch_context->AssignExternalVariable("cinn_var4", tensor4),
paddle::platform::EnforceNotMet);
}
TEST(CinnLaunchOpHelperTest, TestShareTensorWithCinnBuffer) {
TEST(CinnLaunchContextTest, TestSetArgument) {
auto launch_context =
std::make_unique<CinnLaunchContext>(GetDefaultCompiledObj());
platform::CPUPlace place;
framework::Scope scope;
auto* tensor1 = scope.Var("var1")->GetMutable<LoDTensor>();
tensor1->mutable_data<float>(framework::make_ddim({5, 6}), place);
tensor1->mutable_data<float>(framework::make_ddim({3, 4}), place);
auto* data1 = tensor1->data<float>();
data1[0] = 9.99f;
data1[10] = 19.99f;
auto cinn_buffer = ShareTensorWithCinnBuffer(tensor1);
// assign external variable
ASSERT_NO_THROW(launch_context->AssignExternalVariable("var1", tensor1));
auto* tensor2 = scope.Var("var2")->GetMutable<LoDTensor>();
tensor2->mutable_data<float>(framework::make_ddim({6, 7, 8}), place);
ASSERT_NO_THROW(launch_context->AssignInternalVariable("cinn_var2", tensor2));
// FinalizeArguments not missed check
ASSERT_THROW(launch_context->FinalizeArguments(),
paddle::platform::EnforceNotMet);
auto* tensor3 = scope.Var("var3")->GetMutable<LoDTensor>();
tensor3->mutable_data<float>(framework::make_ddim({10, 16}), place);
ASSERT_NO_THROW(launch_context->AssignExternalVariable("var3", tensor3));
auto name2argument = launch_context->FinalizeArguments();
ASSERT_EQ(name2argument.size(), 3);
ASSERT_EQ(name2argument.count("cinn_var1"), 1);
// check ShareTensorWithCinnBuffer
auto* cinn_buffer =
static_cast<cinn_buffer_t*>(name2argument.at("cinn_var1"));
ASSERT_NE(cinn_buffer->memory, nullptr);
ASSERT_EQ(cinn_buffer->num_elements(), 30);
ASSERT_EQ(cinn_buffer->num_elements(), 12);
auto* shadow_data = reinterpret_cast<float*>(cinn_buffer->memory);
EXPECT_FLOAT_EQ(shadow_data[0], 9.99f);
EXPECT_FLOAT_EQ(shadow_data[10], 19.99f);
}
TEST(CinnLaunchOpHelperTest, TestCheckArgumentsNotMissed) {
CinnScope cinn_scope;
cinn_scope.Var<CinnTensor>("cinn_var1");
cinn_scope.Var<CinnTensor>("cinn_var2");
std::map<std::string, cinn_pod_value_t> name2argument(
{{"cinn_var1", cinn_pod_value_t()}, {"cinn_var2", cinn_pod_value_t()}});
ASSERT_NO_THROW(CheckArgumentsNotMissed(cinn_scope, name2argument));
name2argument.erase("cinn_var2");
ASSERT_THROW(CheckArgumentsNotMissed(cinn_scope, name2argument),
paddle::platform::EnforceNotMet);
}
} // namespace details
} // namespace operators
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册