未验证 提交 87054fe3 编写于 作者: H HongyuJia 提交者: GitHub

[Fix Bug] Fix random fail of CustomOp due to python lock (#54772)

上级 46c57674
...@@ -521,87 +521,84 @@ static PyObject* eager_api_run_custom_op(PyObject* self, ...@@ -521,87 +521,84 @@ static PyObject* eager_api_run_custom_op(PyObject* self,
std::string op_type = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 0), 0); std::string op_type = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 0), 0);
VLOG(7) << "Get things from python for Custom Op: " << op_type; VLOG(7) << "Get things from python for Custom Op: " << op_type;
paddle::CustomOpKernelContext ctx; paddle::CustomOpKernelContext ctx;
{ auto meta_info_map = egr::Controller::Instance().GetOpMetaInfoMap();
eager_gil_scoped_release guard; PADDLE_ENFORCE_NE(meta_info_map.find(op_type),
auto meta_info_map = egr::Controller::Instance().GetOpMetaInfoMap(); meta_info_map.end(),
PADDLE_ENFORCE_NE( paddle::platform::errors::NotFound(
meta_info_map.find(op_type), "Can't find %s in Eager OpMetaInfoMap which should be "
meta_info_map.end(), "created by LoadOpMetaInfoAndRegisterOp, please make "
paddle::platform::errors::NotFound( "sure you registered your op first and try again. ",
"Can't find %s in Eager OpMetaInfoMap which should be " op_type));
"created by LoadOpMetaInfoAndRegisterOp, please make " const auto& vec_map = meta_info_map.at(op_type);
"sure you registered your op first and try again. ", const auto& inputs = paddle::OpMetaInfoHelper::GetInputs(vec_map[0]);
op_type)); const auto& attrs = paddle::OpMetaInfoHelper::GetAttrs(vec_map[0]);
const auto& vec_map = meta_info_map.at(op_type); const auto& outputs = paddle::OpMetaInfoHelper::GetOutputs(vec_map[0]);
const auto& inputs = paddle::OpMetaInfoHelper::GetInputs(vec_map[0]); const auto& inplace_map = paddle::OpMetaInfoHelper::GetInplaceMap(vec_map[0]);
const auto& attrs = paddle::OpMetaInfoHelper::GetAttrs(vec_map[0]); for (size_t i = 0; i < inputs.size(); ++i) {
const auto& outputs = paddle::OpMetaInfoHelper::GetOutputs(vec_map[0]); const auto& input = inputs.at(i);
const auto& inplace_map = // Parse op_type first, so that use i + 1
paddle::OpMetaInfoHelper::GetInplaceMap(vec_map[0]); PyObject* obj = PyTuple_GET_ITEM(args, i + 1);
for (size_t i = 0; i < inputs.size(); ++i) { // Emplace Py_None from python, this means optional inputs passed to C++,
const auto& input = inputs.at(i); // use one un-initialized tensor to indicate both Tensor and
// Parse op_type first, so that use i + 1 // vector<Tensor> inputs.
PyObject* obj = PyTuple_GET_ITEM(args, i + 1); if (obj == Py_None) {
// Emplace Py_None from python, this means optional inputs passed to C++, VLOG(7) << "Custom operator add input " << input
// use one un-initialized tensor to indicate both Tensor and << " to CustomOpKernelContext. Add un-initialized tensor "
// vector<Tensor> inputs. "because the optional input is None";
if (obj == Py_None) { ctx.EmplaceBackInput(std::move(paddle::Tensor()));
VLOG(7) << "Custom operator add input " << input continue;
<< " to CustomOpKernelContext. Add un-initialized tensor "
"because the optional input is None";
ctx.EmplaceBackInput(std::move(paddle::Tensor()));
continue;
}
if (paddle::framework::detail::IsDuplicableVar(input)) {
ctx.EmplaceBackInputs(std::move(CastPyArg2VectorOfTensor(obj, i + 1)));
VLOG(7) << "Custom operator add input " << input
<< " to CustomOpKernelContext. Add vector<Tensor> size = "
<< ctx.InputRangeAt(i).second - ctx.InputRangeAt(i).first;
} else {
ctx.EmplaceBackInput(std::move(CastPyArg2Tensor(obj, i + 1)));
VLOG(7) << "Custom operator add input " << input
<< " to CustomOpKernelContext. Add Tensor for general case.";
}
} }
// Parse op_type and inputs first, so that use 1 + inputs.size() + i if (paddle::framework::detail::IsDuplicableVar(input)) {
int attr_start_idx = 1 + inputs.size(); ctx.EmplaceBackInputs(std::move(CastPyArg2VectorOfTensor(obj, i + 1)));
for (size_t i = 0; i < attrs.size(); ++i) { VLOG(7) << "Custom operator add input " << input
const auto& attr = attrs.at(i); << " to CustomOpKernelContext. Add vector<Tensor> size = "
std::vector<std::string> attr_name_and_type = paddle::ParseAttrStr(attr); << ctx.InputRangeAt(i).second - ctx.InputRangeAt(i).first;
auto attr_type_str = attr_name_and_type[1]; } else {
VLOG(7) << "Custom operator add attrs " << attr_name_and_type[0] ctx.EmplaceBackInput(std::move(CastPyArg2Tensor(obj, i + 1)));
<< " to CustomOpKernelContext. Attribute type = " VLOG(7) << "Custom operator add input " << input
<< attr_type_str; << " to CustomOpKernelContext. Add Tensor for general case.";
PyObject* obj = PyTuple_GET_ITEM(args, attr_start_idx + i);
if (attr_type_str == "bool") {
ctx.EmplaceBackAttr(CastPyArg2AttrBoolean(obj, attr_start_idx + i));
} else if (attr_type_str == "int") {
ctx.EmplaceBackAttr(CastPyArg2AttrInt(obj, attr_start_idx + i));
} else if (attr_type_str == "float") {
ctx.EmplaceBackAttr(CastPyArg2AttrFloat(obj, attr_start_idx + i));
} else if (attr_type_str == "int64_t") {
ctx.EmplaceBackAttr(CastPyArg2Long(obj, op_type, attr_start_idx + i));
} else if (attr_type_str == "std::string") {
ctx.EmplaceBackAttr(CastPyArg2AttrString(obj, attr_start_idx + i));
} else if (attr_type_str == "std::vector<int>") {
ctx.EmplaceBackAttr(CastPyArg2VectorOfInt(obj, attr_start_idx + i));
} else if (attr_type_str == "std::vector<float>") {
ctx.EmplaceBackAttr(CastPyArg2VectorOfFloat(obj, attr_start_idx + i));
} else if (attr_type_str == "std::vector<int64_t>") {
ctx.EmplaceBackAttr(CastPyArg2Longs(obj, op_type, attr_start_idx + i));
} else if (attr_type_str == "std::vector<std::string>") {
ctx.EmplaceBackAttr(CastPyArg2VectorOfString(obj, attr_start_idx + i));
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported `%s` type value as custom attribute now. "
"Supported data types include `bool`, `int`, `float`, "
"`int64_t`, `std::string`, `std::vector<int>`, "
"`std::vector<float>`, `std::vector<int64_t>`, "
"`std::vector<std::string>`, Please check whether "
"the attribute data type and data type string are matched.",
attr_type_str));
}
} }
}
// Parse op_type and inputs first, so that use 1 + inputs.size() + i
int attr_start_idx = 1 + inputs.size();
for (size_t i = 0; i < attrs.size(); ++i) {
const auto& attr = attrs.at(i);
std::vector<std::string> attr_name_and_type = paddle::ParseAttrStr(attr);
auto attr_type_str = attr_name_and_type[1];
VLOG(7) << "Custom operator add attrs " << attr_name_and_type[0]
<< " to CustomOpKernelContext. Attribute type = " << attr_type_str;
PyObject* obj = PyTuple_GET_ITEM(args, attr_start_idx + i);
if (attr_type_str == "bool") {
ctx.EmplaceBackAttr(CastPyArg2AttrBoolean(obj, attr_start_idx + i));
} else if (attr_type_str == "int") {
ctx.EmplaceBackAttr(CastPyArg2AttrInt(obj, attr_start_idx + i));
} else if (attr_type_str == "float") {
ctx.EmplaceBackAttr(CastPyArg2AttrFloat(obj, attr_start_idx + i));
} else if (attr_type_str == "int64_t") {
ctx.EmplaceBackAttr(CastPyArg2Long(obj, op_type, attr_start_idx + i));
} else if (attr_type_str == "std::string") {
ctx.EmplaceBackAttr(CastPyArg2AttrString(obj, attr_start_idx + i));
} else if (attr_type_str == "std::vector<int>") {
ctx.EmplaceBackAttr(CastPyArg2VectorOfInt(obj, attr_start_idx + i));
} else if (attr_type_str == "std::vector<float>") {
ctx.EmplaceBackAttr(CastPyArg2VectorOfFloat(obj, attr_start_idx + i));
} else if (attr_type_str == "std::vector<int64_t>") {
ctx.EmplaceBackAttr(CastPyArg2Longs(obj, op_type, attr_start_idx + i));
} else if (attr_type_str == "std::vector<std::string>") {
ctx.EmplaceBackAttr(CastPyArg2VectorOfString(obj, attr_start_idx + i));
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported `%s` type value as custom attribute now. "
"Supported data types include `bool`, `int`, `float`, "
"`int64_t`, `std::string`, `std::vector<int>`, "
"`std::vector<float>`, `std::vector<int64_t>`, "
"`std::vector<std::string>`, Please check whether "
"the attribute data type and data type string are matched.",
attr_type_str));
}
}
{
eager_gil_scoped_release guard;
ctx.ConstructInplaceIndex(inputs, outputs, inplace_map); ctx.ConstructInplaceIndex(inputs, outputs, inplace_map);
const auto& inplace_reverse_idx_map = ctx.GetInplaceReverseIndexMap(); const auto& inplace_reverse_idx_map = ctx.GetInplaceReverseIndexMap();
for (size_t out_idx = 0; out_idx < outputs.size(); ++out_idx) { for (size_t out_idx = 0; out_idx < outputs.size(); ++out_idx) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册