未验证 提交 4b085c57 编写于 作者: R Ruibiao Chen 提交者: GitHub

Rename BOOST_GET macros (#44368)

* Rename BOOST_GET macros

* Fix conflicts
上级 d4bb2ad7
...@@ -416,7 +416,7 @@ bool DistModel::PrepareFeedAndFetch() { ...@@ -416,7 +416,7 @@ bool DistModel::PrepareFeedAndFetch() {
for (auto *op : program_->Block(0).AllOps()) { for (auto *op : program_->Block(0).AllOps()) {
if (op->Type() == "feed") { if (op->Type() == "feed") {
VLOG(3) << "feed op with feed var: " << op->Output("Out")[0]; VLOG(3) << "feed op with feed var: " << op->Output("Out")[0];
int idx = BOOST_GET_CONST(int, op->GetAttr("col")); int idx = PADDLE_GET_CONST(int, op->GetAttr("col"));
if (feeds_.size() <= static_cast<size_t>(idx)) { if (feeds_.size() <= static_cast<size_t>(idx)) {
feeds_.resize(idx + 1); feeds_.resize(idx + 1);
} }
...@@ -446,7 +446,7 @@ bool DistModel::PrepareFeedAndFetch() { ...@@ -446,7 +446,7 @@ bool DistModel::PrepareFeedAndFetch() {
} }
} else if (op->Type() == "fetch") { } else if (op->Type() == "fetch") {
VLOG(3) << "fetch op with fetch var: " << op->Input("X")[0]; VLOG(3) << "fetch op with fetch var: " << op->Input("X")[0];
int idx = BOOST_GET_CONST(int, op->GetAttr("col")); int idx = PADDLE_GET_CONST(int, op->GetAttr("col"));
if (fetches_.size() <= static_cast<size_t>(idx)) { if (fetches_.size() <= static_cast<size_t>(idx)) {
fetches_.resize(idx + 1); fetches_.resize(idx + 1);
} }
...@@ -507,7 +507,7 @@ bool DistModel::FetchResults(std::vector<DistModelTensor> *output_data, ...@@ -507,7 +507,7 @@ bool DistModel::FetchResults(std::vector<DistModelTensor> *output_data,
VLOG(3) << "DistModel is fetch results."; VLOG(3) << "DistModel is fetch results.";
output_data->resize(fetches_.size()); output_data->resize(fetches_.size());
for (size_t i = 0; i < fetches_.size(); ++i) { for (size_t i = 0; i < fetches_.size(); ++i) {
int idx = BOOST_GET_CONST(int, fetches_[i]->GetAttr("col")); int idx = PADDLE_GET_CONST(int, fetches_[i]->GetAttr("col"));
VLOG(3) << "Fetching data for [" << idx_to_fetches_[idx] << "]"; VLOG(3) << "Fetching data for [" << idx_to_fetches_[idx] << "]";
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
static_cast<size_t>(idx), static_cast<size_t>(idx),
...@@ -518,7 +518,7 @@ bool DistModel::FetchResults(std::vector<DistModelTensor> *output_data, ...@@ -518,7 +518,7 @@ bool DistModel::FetchResults(std::vector<DistModelTensor> *output_data,
i)); i));
framework::FetchType &fetch_var = framework::FetchType &fetch_var =
framework::GetFetchVariable(*scope, "fetch", idx); framework::GetFetchVariable(*scope, "fetch", idx);
auto &fetch = BOOST_GET(framework::LoDTensor, fetch_var); auto &fetch = PADDLE_GET(framework::LoDTensor, fetch_var);
auto type = framework::TransToProtoVarType(fetch.dtype()); auto type = framework::TransToProtoVarType(fetch.dtype());
auto output = &(output_data->at(i)); auto output = &(output_data->at(i));
output->name = idx_to_fetches_[idx]; output->name = idx_to_fetches_[idx];
......
...@@ -398,7 +398,7 @@ fused_attention_dygraph_function( ...@@ -398,7 +398,7 @@ fused_attention_dygraph_function(
bool pre_layer_norm = false; bool pre_layer_norm = false;
if (attrs.count("pre_layer_norm")) { if (attrs.count("pre_layer_norm")) {
pre_layer_norm = BOOST_GET_CONST(bool, attrs.at("pre_layer_norm")); pre_layer_norm = PADDLE_GET_CONST(bool, attrs.at("pre_layer_norm"));
} }
// Set Attributes // Set Attributes
......
...@@ -318,7 +318,7 @@ fused_feedforward_dygraph_function( ...@@ -318,7 +318,7 @@ fused_feedforward_dygraph_function(
bool pre_layer_norm = false; bool pre_layer_norm = false;
if (attrs.count("pre_layer_norm")) { if (attrs.count("pre_layer_norm")) {
pre_layer_norm = BOOST_GET_CONST(bool, attrs.at("pre_layer_norm")); pre_layer_norm = PADDLE_GET_CONST(bool, attrs.at("pre_layer_norm"));
} }
// Set Attributes // Set Attributes
......
...@@ -303,12 +303,12 @@ fused_gate_attention_dygraph_function( ...@@ -303,12 +303,12 @@ fused_gate_attention_dygraph_function(
bool merge_qkv = true; bool merge_qkv = true;
if (attrs.count("merge_qkv")) { if (attrs.count("merge_qkv")) {
merge_qkv = BOOST_GET_CONST(bool, attrs.at("merge_qkv")); merge_qkv = PADDLE_GET_CONST(bool, attrs.at("merge_qkv"));
} }
bool has_gating = true; bool has_gating = true;
if (attrs.count("has_gating")) { if (attrs.count("has_gating")) {
has_gating = BOOST_GET_CONST(bool, attrs.at("has_gating")); has_gating = PADDLE_GET_CONST(bool, attrs.at("has_gating"));
} }
// Set Attributes // Set Attributes
......
...@@ -38,7 +38,7 @@ fused_attentionGradNodeCompat::operator()( ...@@ -38,7 +38,7 @@ fused_attentionGradNodeCompat::operator()(
bool pre_layer_norm = false; bool pre_layer_norm = false;
if (attr_map_.count("pre_layer_norm")) { if (attr_map_.count("pre_layer_norm")) {
pre_layer_norm = BOOST_GET_CONST(bool, attr_map_.at("pre_layer_norm")); pre_layer_norm = PADDLE_GET_CONST(bool, attr_map_.at("pre_layer_norm"));
} }
std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>> ins0 = std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>> ins0 =
......
...@@ -40,7 +40,7 @@ fused_feedforwardGradNodeCompat::operator()( ...@@ -40,7 +40,7 @@ fused_feedforwardGradNodeCompat::operator()(
bool pre_layer_norm = false; bool pre_layer_norm = false;
if (attr_map_.count("pre_layer_norm")) { if (attr_map_.count("pre_layer_norm")) {
pre_layer_norm = BOOST_GET_CONST(bool, attr_map_.at("pre_layer_norm")); pre_layer_norm = PADDLE_GET_CONST(bool, attr_map_.at("pre_layer_norm"));
} }
std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>> ins0 = std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>> ins0 =
......
...@@ -40,12 +40,12 @@ fused_gate_attentionGradNodeCompat::operator()( ...@@ -40,12 +40,12 @@ fused_gate_attentionGradNodeCompat::operator()(
bool merge_qkv = true; bool merge_qkv = true;
if (attr_map_.count("merge_qkv")) { if (attr_map_.count("merge_qkv")) {
merge_qkv = BOOST_GET_CONST(bool, attr_map_.at("merge_qkv")); merge_qkv = PADDLE_GET_CONST(bool, attr_map_.at("merge_qkv"));
} }
bool has_gating = true; bool has_gating = true;
if (attr_map_.count("has_gating")) { if (attr_map_.count("has_gating")) {
has_gating = BOOST_GET_CONST(bool, attr_map_.at("has_gating")); has_gating = PADDLE_GET_CONST(bool, attr_map_.at("has_gating"));
} }
std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>> ins0 = std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>> ins0 =
......
...@@ -352,7 +352,7 @@ static typename std::enable_if<IsVector, std::string>::type GetAttrValue( ...@@ -352,7 +352,7 @@ static typename std::enable_if<IsVector, std::string>::type GetAttrValue(
const framework::Attribute& attr) { const framework::Attribute& attr) {
std::string val = ""; std::string val = "";
val += "{"; val += "{";
for (auto x : BOOST_GET_CONST(std::vector<T>, attr)) { for (auto x : PADDLE_GET_CONST(std::vector<T>, attr)) {
val += std::to_string(x) + ","; val += std::to_string(x) + ",";
} }
if (val.size() > 1) val.pop_back(); if (val.size() > 1) val.pop_back();
...@@ -363,7 +363,7 @@ static typename std::enable_if<IsVector, std::string>::type GetAttrValue( ...@@ -363,7 +363,7 @@ static typename std::enable_if<IsVector, std::string>::type GetAttrValue(
template <typename T, bool IsVector> template <typename T, bool IsVector>
static typename std::enable_if<!IsVector, std::string>::type GetAttrValue( static typename std::enable_if<!IsVector, std::string>::type GetAttrValue(
const framework::Attribute& attr) { const framework::Attribute& attr) {
return std::to_string(BOOST_GET_CONST(T, attr)); return std::to_string(PADDLE_GET_CONST(T, attr));
} }
static std::pair<std::string, std::string> GetAttrType( static std::pair<std::string, std::string> GetAttrType(
...@@ -385,7 +385,7 @@ static std::pair<std::string, std::string> GetAttrType( ...@@ -385,7 +385,7 @@ static std::pair<std::string, std::string> GetAttrType(
case (3): { case (3): {
ret = "std::string"; ret = "std::string";
if (is_arg) ret += "&"; if (is_arg) ret += "&";
val = "\"" + BOOST_GET_CONST(std::string, attr) + "\""; val = "\"" + PADDLE_GET_CONST(std::string, attr) + "\"";
break; break;
} }
case (4): { case (4): {
...@@ -404,7 +404,7 @@ static std::pair<std::string, std::string> GetAttrType( ...@@ -404,7 +404,7 @@ static std::pair<std::string, std::string> GetAttrType(
ret = "std::vector<std::string>"; ret = "std::vector<std::string>";
if (is_arg) ret += "&"; if (is_arg) ret += "&";
val += "{"; val += "{";
for (auto x : BOOST_GET_CONST(std::vector<std::string>, attr)) { for (auto x : PADDLE_GET_CONST(std::vector<std::string>, attr)) {
val += "\"" + x + "\"" + ","; val += "\"" + x + "\"" + ",";
} }
if (val.size() > 1) val.pop_back(); if (val.size() > 1) val.pop_back();
......
...@@ -191,16 +191,16 @@ inline void RunProgramAPI( ...@@ -191,16 +191,16 @@ inline void RunProgramAPI(
std::vector<paddle::experimental::Tensor *> &dout, // NOLINT std::vector<paddle::experimental::Tensor *> &dout, // NOLINT
const paddle::framework::AttributeMap &attrs) { const paddle::framework::AttributeMap &attrs) {
VLOG(2) << "RunProgramOpKernel Compute"; VLOG(2) << "RunProgramOpKernel Compute";
auto start_op_index = BOOST_GET_CONST(int64_t, attrs.at("start_op_index")); auto start_op_index = PADDLE_GET_CONST(int64_t, attrs.at("start_op_index"));
auto end_op_index = BOOST_GET_CONST(int64_t, attrs.at("end_op_index")); auto end_op_index = PADDLE_GET_CONST(int64_t, attrs.at("end_op_index"));
// In the original run_program OP, the default value of the is_test // In the original run_program OP, the default value of the is_test
// attribute is false, we should check if there is is_test parameter // attribute is false, we should check if there is is_test parameter
// in attrs // in attrs
auto is_test = false; auto is_test = false;
if (attrs.count("is_test")) { if (attrs.count("is_test")) {
is_test = BOOST_GET_CONST(bool, attrs.at("is_test")); is_test = PADDLE_GET_CONST(bool, attrs.at("is_test"));
} }
auto program_id = BOOST_GET_CONST(int64_t, attrs.at("program_id")); auto program_id = PADDLE_GET_CONST(int64_t, attrs.at("program_id"));
// NOTE(chenweihang): In order not to add new variable type, use vector // NOTE(chenweihang): In order not to add new variable type, use vector
// here. Originally, here can use scope directly. // here. Originally, here can use scope directly.
...@@ -226,8 +226,8 @@ inline void RunProgramAPI( ...@@ -226,8 +226,8 @@ inline void RunProgramAPI(
details::ShareTensorsIntoScope(x, &scope); details::ShareTensorsIntoScope(x, &scope);
details::ShareTensorsIntoScope(params, &scope); details::ShareTensorsIntoScope(params, &scope);
auto *global_block = auto *global_block = PADDLE_GET_CONST(paddle::framework::BlockDesc *,
BOOST_GET_CONST(paddle::framework::BlockDesc *, attrs.at("global_block")); attrs.at("global_block"));
const auto &place = egr::Controller::Instance().GetExpectedPlace(); const auto &place = egr::Controller::Instance().GetExpectedPlace();
if (end_op_index > start_op_index) { if (end_op_index > start_op_index) {
...@@ -292,11 +292,11 @@ inline void RunProgramGradAPI( ...@@ -292,11 +292,11 @@ inline void RunProgramGradAPI(
// if all output vars are set to stop_gradient, grad op no need to executed // if all output vars are set to stop_gradient, grad op no need to executed
if (x_grad.empty() && params_grad.empty()) return; if (x_grad.empty() && params_grad.empty()) return;
auto *global_block = auto *global_block = PADDLE_GET_CONST(paddle::framework::BlockDesc *,
BOOST_GET_CONST(paddle::framework::BlockDesc *, attrs.at("global_block")); attrs.at("global_block"));
auto orig_end_op_index = BOOST_GET_CONST(int64_t, attrs.at("end_op_index")); auto orig_end_op_index = PADDLE_GET_CONST(int64_t, attrs.at("end_op_index"));
auto program_id = BOOST_GET_CONST(int64_t, attrs.at("program_id")); auto program_id = PADDLE_GET_CONST(int64_t, attrs.at("program_id"));
// NOTE: skip `shape` and `fill_constant` op created by // NOTE: skip `shape` and `fill_constant` op created by
// fluid.backward.gradients, one forward output will generate one `shape` // fluid.backward.gradients, one forward output will generate one `shape`
// and `fill_constant` // and `fill_constant`
......
...@@ -21,31 +21,31 @@ namespace framework { ...@@ -21,31 +21,31 @@ namespace framework {
paddle::any GetAttrValue(const Attribute& attr) { paddle::any GetAttrValue(const Attribute& attr) {
switch (AttrTypeID(attr)) { switch (AttrTypeID(attr)) {
case proto::AttrType::INT: case proto::AttrType::INT:
return BOOST_GET_CONST(int, attr); return PADDLE_GET_CONST(int, attr);
case proto::AttrType::FLOAT: case proto::AttrType::FLOAT:
return BOOST_GET_CONST(float, attr); return PADDLE_GET_CONST(float, attr);
case proto::AttrType::STRING: case proto::AttrType::STRING:
return BOOST_GET_CONST(std::string, attr); return PADDLE_GET_CONST(std::string, attr);
case proto::AttrType::INTS: case proto::AttrType::INTS:
return BOOST_GET_CONST(std::vector<int>, attr); return PADDLE_GET_CONST(std::vector<int>, attr);
case proto::AttrType::FLOATS: case proto::AttrType::FLOATS:
return BOOST_GET_CONST(std::vector<float>, attr); return PADDLE_GET_CONST(std::vector<float>, attr);
case proto::AttrType::STRINGS: case proto::AttrType::STRINGS:
return BOOST_GET_CONST(std::vector<std::string>, attr); return PADDLE_GET_CONST(std::vector<std::string>, attr);
case proto::AttrType::BOOLEAN: case proto::AttrType::BOOLEAN:
return BOOST_GET_CONST(bool, attr); return PADDLE_GET_CONST(bool, attr);
case proto::AttrType::BOOLEANS: case proto::AttrType::BOOLEANS:
return BOOST_GET_CONST(std::vector<bool>, attr); return PADDLE_GET_CONST(std::vector<bool>, attr);
case proto::AttrType::LONG: case proto::AttrType::LONG:
return BOOST_GET_CONST(int64_t, attr); return PADDLE_GET_CONST(int64_t, attr);
case proto::AttrType::LONGS: case proto::AttrType::LONGS:
return BOOST_GET_CONST(std::vector<int64_t>, attr); return PADDLE_GET_CONST(std::vector<int64_t>, attr);
case proto::AttrType::FLOAT64S: case proto::AttrType::FLOAT64S:
return BOOST_GET_CONST(std::vector<double>, attr); return PADDLE_GET_CONST(std::vector<double>, attr);
case proto::AttrType::BLOCK: case proto::AttrType::BLOCK:
return BOOST_GET_CONST(BlockDesc*, attr); return PADDLE_GET_CONST(BlockDesc*, attr);
case proto::AttrType::BLOCKS: case proto::AttrType::BLOCKS:
return BOOST_GET_CONST(std::vector<BlockDesc*>, attr); return PADDLE_GET_CONST(std::vector<BlockDesc*>, attr);
default: default:
PADDLE_THROW(platform::errors::Unimplemented( PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported Attribute value type `%s` for phi.", "Unsupported Attribute value type `%s` for phi.",
......
...@@ -72,10 +72,10 @@ struct ExtractAttribute<bool> { ...@@ -72,10 +72,10 @@ struct ExtractAttribute<bool> {
bool* operator()(Attribute& attr) const { bool* operator()(Attribute& attr) const {
if (attr.type() == typeid(int)) { // NOLINT if (attr.type() == typeid(int)) { // NOLINT
int val = BOOST_GET_CONST(int, attr); int val = PADDLE_GET_CONST(int, attr);
attr = static_cast<bool>(val); attr = static_cast<bool>(val);
} else if (attr.type() == typeid(float)) { // NOLINT } else if (attr.type() == typeid(float)) { // NOLINT
float val = BOOST_GET_CONST(float, attr); float val = PADDLE_GET_CONST(float, attr);
attr = static_cast<bool>(val); attr = static_cast<bool>(val);
} }
bool* attr_value = nullptr; bool* attr_value = nullptr;
...@@ -100,10 +100,10 @@ struct ExtractAttribute<int64_t> { ...@@ -100,10 +100,10 @@ struct ExtractAttribute<int64_t> {
int64_t* operator()(Attribute& attr) const { int64_t* operator()(Attribute& attr) const {
if (attr.type() == typeid(int)) { // NOLINT if (attr.type() == typeid(int)) { // NOLINT
int val = BOOST_GET_CONST(int, attr); int val = PADDLE_GET_CONST(int, attr);
attr = static_cast<int64_t>(val); attr = static_cast<int64_t>(val);
} else if (attr.type() == typeid(float)) { // NOLINT } else if (attr.type() == typeid(float)) { // NOLINT
int val = BOOST_GET_CONST(float, attr); int val = PADDLE_GET_CONST(float, attr);
attr = static_cast<int64_t>(val); attr = static_cast<int64_t>(val);
} }
int64_t* attr_value = nullptr; int64_t* attr_value = nullptr;
...@@ -128,11 +128,11 @@ struct ExtractAttribute<std::vector<int64_t>> { ...@@ -128,11 +128,11 @@ struct ExtractAttribute<std::vector<int64_t>> {
std::vector<int64_t>* operator()(Attribute& attr) const { std::vector<int64_t>* operator()(Attribute& attr) const {
if (attr.type() == typeid(std::vector<int>)) { // NOLINT if (attr.type() == typeid(std::vector<int>)) { // NOLINT
std::vector<int> val = BOOST_GET_CONST(std::vector<int>, attr); std::vector<int> val = PADDLE_GET_CONST(std::vector<int>, attr);
std::vector<int64_t> vec(val.begin(), val.end()); std::vector<int64_t> vec(val.begin(), val.end());
attr = vec; attr = vec;
} else if (attr.type() == typeid(std::vector<float>)) { // NOLINT } else if (attr.type() == typeid(std::vector<float>)) { // NOLINT
std::vector<float> val = BOOST_GET_CONST(std::vector<float>, attr); std::vector<float> val = PADDLE_GET_CONST(std::vector<float>, attr);
std::vector<int64_t> vec(val.begin(), val.end()); std::vector<int64_t> vec(val.begin(), val.end());
attr = vec; attr = vec;
} }
...@@ -159,10 +159,10 @@ struct ExtractAttribute<float> { ...@@ -159,10 +159,10 @@ struct ExtractAttribute<float> {
float* operator()(Attribute& attr) const { float* operator()(Attribute& attr) const {
if (attr.type() == typeid(int)) { // NOLINT if (attr.type() == typeid(int)) { // NOLINT
int val = BOOST_GET_CONST(int, attr); int val = PADDLE_GET_CONST(int, attr);
attr = static_cast<float>(val); attr = static_cast<float>(val);
} else if (attr.type() == typeid(int64_t)) { // NOLINT } else if (attr.type() == typeid(int64_t)) { // NOLINT
int64_t val = BOOST_GET_CONST(int64_t, attr); int64_t val = PADDLE_GET_CONST(int64_t, attr);
attr = static_cast<float>(val); attr = static_cast<float>(val);
} }
float* attr_value = nullptr; float* attr_value = nullptr;
...@@ -187,11 +187,11 @@ struct ExtractAttribute<std::vector<double>> { ...@@ -187,11 +187,11 @@ struct ExtractAttribute<std::vector<double>> {
std::vector<double>* operator()(Attribute& attr) const { std::vector<double>* operator()(Attribute& attr) const {
if (attr.type() == typeid(std::vector<int>)) { // NOLINT if (attr.type() == typeid(std::vector<int>)) { // NOLINT
std::vector<int> val = BOOST_GET_CONST(std::vector<int>, attr); std::vector<int> val = PADDLE_GET_CONST(std::vector<int>, attr);
std::vector<double> vec(val.begin(), val.end()); std::vector<double> vec(val.begin(), val.end());
attr = vec; attr = vec;
} else if (attr.type() == typeid(std::vector<float>)) { // NOLINT } else if (attr.type() == typeid(std::vector<float>)) { // NOLINT
std::vector<float> val = BOOST_GET_CONST(std::vector<float>, attr); std::vector<float> val = PADDLE_GET_CONST(std::vector<float>, attr);
std::vector<double> vec(val.begin(), val.end()); std::vector<double> vec(val.begin(), val.end());
attr = vec; attr = vec;
} }
......
...@@ -274,11 +274,12 @@ void BlockDesc::MoveFrom(BlockDesc *block) { ...@@ -274,11 +274,12 @@ void BlockDesc::MoveFrom(BlockDesc *block) {
const auto &attr_value = pair.second; const auto &attr_value = pair.second;
auto attr_type = static_cast<proto::AttrType>(attr_value.index() - 1); auto attr_type = static_cast<proto::AttrType>(attr_value.index() - 1);
if (attr_type == proto::AttrType::BLOCK) { if (attr_type == proto::AttrType::BLOCK) {
auto block_id = BOOST_GET_CONST(BlockDesc *, attr_value)->ID(); auto block_id = PADDLE_GET_CONST(BlockDesc *, attr_value)->ID();
dst_op->SetBlockAttr(attr_name, prog_->MutableBlock(block_id)); dst_op->SetBlockAttr(attr_name, prog_->MutableBlock(block_id));
VLOG(10) << "Set block attr " << attr_name << " id " << block_id; VLOG(10) << "Set block attr " << attr_name << " id " << block_id;
} else if (attr_type == proto::AttrType::BLOCKS) { } else if (attr_type == proto::AttrType::BLOCKS) {
auto old_blocks = BOOST_GET_CONST(std::vector<BlockDesc *>, attr_value); auto old_blocks =
PADDLE_GET_CONST(std::vector<BlockDesc *>, attr_value);
std::vector<BlockDesc *> new_blocks; std::vector<BlockDesc *> new_blocks;
new_blocks.reserve(old_blocks.size()); new_blocks.reserve(old_blocks.size());
for (auto *b : old_blocks) { for (auto *b : old_blocks) {
......
...@@ -174,16 +174,16 @@ FetchResultType AsyncSSAGraphExecutor::Run( ...@@ -174,16 +174,16 @@ FetchResultType AsyncSSAGraphExecutor::Run(
HandleException(); HandleException();
FetchList ret; FetchList ret;
auto &val = BOOST_GET(FetchList, fetch_data); auto &val = PADDLE_GET(FetchList, fetch_data);
for (size_t fetch_idx = 0; fetch_idx < fetch_tensors.size(); ++fetch_idx) { for (size_t fetch_idx = 0; fetch_idx < fetch_tensors.size(); ++fetch_idx) {
if (data_is_lod_tensor(val.at(fetch_idx))) { if (data_is_lod_tensor(val.at(fetch_idx))) {
std::vector<const LoDTensor *> lodtensor_ptrs; std::vector<const LoDTensor *> lodtensor_ptrs;
lodtensor_ptrs.push_back(&(BOOST_GET(LoDTensor, val.at(fetch_idx)))); lodtensor_ptrs.push_back(&(PADDLE_GET(LoDTensor, val.at(fetch_idx))));
LoDTensor var; LoDTensor var;
MergeLoDTensor(&var, lodtensor_ptrs, platform::CPUPlace()); MergeLoDTensor(&var, lodtensor_ptrs, platform::CPUPlace());
ret.emplace_back(var); ret.emplace_back(var);
} else { } else {
auto array = BOOST_GET(LoDTensorArray, val.at(fetch_idx)); auto array = PADDLE_GET(LoDTensorArray, val.at(fetch_idx));
LoDTensorArray item_array; LoDTensorArray item_array;
item_array.reserve(array.size()); item_array.reserve(array.size());
for (size_t i = 0; i < array.size(); ++i) { for (size_t i = 0; i < array.size(); ++i) {
......
...@@ -228,7 +228,7 @@ void FetchAsyncOpHandle::RunImpl() { ...@@ -228,7 +228,7 @@ void FetchAsyncOpHandle::RunImpl() {
} }
if (return_merged_) { if (return_merged_) {
auto &val = BOOST_GET(FetchList, *data_); auto &val = PADDLE_GET(FetchList, *data_);
if (src_vars[0]->IsType<LoDTensor>()) { if (src_vars[0]->IsType<LoDTensor>()) {
// to lodtensor type // to lodtensor type
std::vector<const LoDTensor *> src_lodtensors; std::vector<const LoDTensor *> src_lodtensors;
...@@ -263,7 +263,7 @@ void FetchAsyncOpHandle::RunImpl() { ...@@ -263,7 +263,7 @@ void FetchAsyncOpHandle::RunImpl() {
val.at(offset_) = std::move(dst_lodtensor_array); val.at(offset_) = std::move(dst_lodtensor_array);
} }
} else { } else {
auto &val = BOOST_GET(FetchUnmergedList, *data_); auto &val = PADDLE_GET(FetchUnmergedList, *data_);
auto &dst_tensors = val.at(offset_); auto &dst_tensors = val.at(offset_);
dst_tensors.reserve(src_vars.size()); dst_tensors.reserve(src_vars.size());
......
...@@ -74,22 +74,22 @@ static void CheckDims(const framework::DDim &tensor_dims, ...@@ -74,22 +74,22 @@ static void CheckDims(const framework::DDim &tensor_dims,
void FetchOpHandle::WaitAndMergeCPUFetchVars() const { void FetchOpHandle::WaitAndMergeCPUFetchVars() const {
if (return_merged_) { if (return_merged_) {
if (data_is_lod_tensor(tensors_[0])) { if (data_is_lod_tensor(tensors_[0])) {
const auto &tensor_dims = BOOST_GET_CONST(LoDTensor, tensors_[0]).dims(); const auto &tensor_dims = PADDLE_GET_CONST(LoDTensor, tensors_[0]).dims();
for (size_t i = 1; i < tensors_.size(); i++) { for (size_t i = 1; i < tensors_.size(); i++) {
const auto &ele_dims = BOOST_GET_CONST(LoDTensor, tensors_[i]).dims(); const auto &ele_dims = PADDLE_GET_CONST(LoDTensor, tensors_[i]).dims();
CheckDims(tensor_dims, ele_dims, offset_); CheckDims(tensor_dims, ele_dims, offset_);
} }
std::vector<const LoDTensor *> tensors_ptr; std::vector<const LoDTensor *> tensors_ptr;
tensors_ptr.reserve(tensors_.size()); tensors_ptr.reserve(tensors_.size());
for (auto &t : tensors_) { for (auto &t : tensors_) {
tensors_ptr.emplace_back(&BOOST_GET_CONST(LoDTensor, t)); tensors_ptr.emplace_back(&PADDLE_GET_CONST(LoDTensor, t));
} }
auto &val = BOOST_GET(FetchList, *data_); auto &val = PADDLE_GET(FetchList, *data_);
LoDTensor var; LoDTensor var;
MergeLoDTensor(&var, tensors_ptr, platform::CPUPlace()); MergeLoDTensor(&var, tensors_ptr, platform::CPUPlace());
val.at(offset_) = std::move(var); val.at(offset_) = std::move(var);
} else { } else {
auto &array = BOOST_GET_CONST(LoDTensorArray, tensors_[0]); auto &array = PADDLE_GET_CONST(LoDTensorArray, tensors_[0]);
LoDTensorArray tmp_array; LoDTensorArray tmp_array;
tmp_array.reserve(array.size()); tmp_array.reserve(array.size());
for (size_t i = 0; i < array.size(); ++i) { for (size_t i = 0; i < array.size(); ++i) {
...@@ -98,7 +98,7 @@ void FetchOpHandle::WaitAndMergeCPUFetchVars() const { ...@@ -98,7 +98,7 @@ void FetchOpHandle::WaitAndMergeCPUFetchVars() const {
tensors_ptr.reserve(tensors_.size()); tensors_ptr.reserve(tensors_.size());
tensors_ptr.push_back(&array[i]); tensors_ptr.push_back(&array[i]);
for (size_t j = 1; j < tensors_.size(); ++j) { for (size_t j = 1; j < tensors_.size(); ++j) {
auto &element = BOOST_GET_CONST(LoDTensorArray, tensors_[j]); auto &element = PADDLE_GET_CONST(LoDTensorArray, tensors_[j]);
const auto &ele_dims = element[i].dims(); const auto &ele_dims = element[i].dims();
CheckDims(tensor_dims, ele_dims, offset_); CheckDims(tensor_dims, ele_dims, offset_);
tensors_ptr.push_back(&element[i]); tensors_ptr.push_back(&element[i]);
...@@ -106,11 +106,11 @@ void FetchOpHandle::WaitAndMergeCPUFetchVars() const { ...@@ -106,11 +106,11 @@ void FetchOpHandle::WaitAndMergeCPUFetchVars() const {
tmp_array.emplace_back(); tmp_array.emplace_back();
MergeLoDTensor(&(tmp_array.back()), tensors_ptr, platform::CPUPlace()); MergeLoDTensor(&(tmp_array.back()), tensors_ptr, platform::CPUPlace());
} }
auto &val = BOOST_GET(FetchList, *data_); auto &val = PADDLE_GET(FetchList, *data_);
val.at(offset_) = std::move(tmp_array); val.at(offset_) = std::move(tmp_array);
} }
} else { } else {
auto &val = BOOST_GET(FetchUnmergedList, *data_); auto &val = PADDLE_GET(FetchUnmergedList, *data_);
val.at(offset_) = std::move(tensors_); val.at(offset_) = std::move(tensors_);
} }
} }
...@@ -151,13 +151,13 @@ void FetchOpHandle::RunImpl() { ...@@ -151,13 +151,13 @@ void FetchOpHandle::RunImpl() {
if (var->IsType<LoDTensor>()) { if (var->IsType<LoDTensor>()) {
auto &t = var->Get<framework::LoDTensor>(); auto &t = var->Get<framework::LoDTensor>();
auto &item = BOOST_GET(LoDTensor, tensors_[i]); auto &item = PADDLE_GET(LoDTensor, tensors_[i]);
TransData(t, &item); TransData(t, &item);
} else { } else {
auto &t = var->Get<framework::LoDTensorArray>(); auto &t = var->Get<framework::LoDTensorArray>();
LoDTensorArray tmp(t.size()); LoDTensorArray tmp(t.size());
tensors_[i] = tmp; tensors_[i] = tmp;
auto &item = BOOST_GET(LoDTensorArray, tensors_[i]); auto &item = PADDLE_GET(LoDTensorArray, tensors_[i]);
for (size_t j = 0; j < t.size(); ++j) { for (size_t j = 0; j < t.size(); ++j) {
TransData(t[j], &item[j]); TransData(t[j], &item[j]);
} }
......
...@@ -88,7 +88,7 @@ inline bool IsOpRole(const OpDesc &op, OpRole role) { ...@@ -88,7 +88,7 @@ inline bool IsOpRole(const OpDesc &op, OpRole role) {
const auto &attrs = op.GetAttrMap(); const auto &attrs = op.GetAttrMap();
auto iter = attrs.find(OpProtoAndCheckerMaker::OpRoleAttrName()); auto iter = attrs.find(OpProtoAndCheckerMaker::OpRoleAttrName());
if (iter == attrs.end()) return false; if (iter == attrs.end()) return false;
return static_cast<bool>(BOOST_GET_CONST(int, iter->second) & return static_cast<bool>(PADDLE_GET_CONST(int, iter->second) &
static_cast<int>(role)); static_cast<int>(role));
} }
...@@ -96,7 +96,7 @@ inline std::vector<std::string> GetOpRoleVarsOrEmpty(const OpDesc &op) { ...@@ -96,7 +96,7 @@ inline std::vector<std::string> GetOpRoleVarsOrEmpty(const OpDesc &op) {
const auto &attrs = op.GetAttrMap(); const auto &attrs = op.GetAttrMap();
auto iter = attrs.find(OpProtoAndCheckerMaker::OpRoleVarAttrName()); auto iter = attrs.find(OpProtoAndCheckerMaker::OpRoleVarAttrName());
if (iter == attrs.end()) return {}; if (iter == attrs.end()) return {};
auto &ret = BOOST_GET_CONST(std::vector<std::string>, iter->second); auto &ret = PADDLE_GET_CONST(std::vector<std::string>, iter->second);
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ret.size() % 2, ret.size() % 2,
0, 0,
...@@ -104,7 +104,7 @@ inline std::vector<std::string> GetOpRoleVarsOrEmpty(const OpDesc &op) { ...@@ -104,7 +104,7 @@ inline std::vector<std::string> GetOpRoleVarsOrEmpty(const OpDesc &op) {
"The size of attribute %s must be an even number, but got %d", "The size of attribute %s must be an even number, but got %d",
OpProtoAndCheckerMaker::OpRoleVarAttrName(), OpProtoAndCheckerMaker::OpRoleVarAttrName(),
ret.size())); ret.size()));
return BOOST_GET_CONST(std::vector<std::string>, iter->second); return PADDLE_GET_CONST(std::vector<std::string>, iter->second);
} }
bool IsDataParallelInferenceGraph(const ir::Graph &graph); bool IsDataParallelInferenceGraph(const ir::Graph &graph);
......
...@@ -279,13 +279,13 @@ FetchResultType ParallelSSAGraphExecutor::Run( ...@@ -279,13 +279,13 @@ FetchResultType ParallelSSAGraphExecutor::Run(
continue; continue;
} }
const auto &fetch_list = const auto &fetch_list =
BOOST_GET_CONST(FetchList, fetch_data[scope_idx]); PADDLE_GET_CONST(FetchList, fetch_data[scope_idx]);
if (data_is_lod_tensor(fetch_list[fetch_idx])) { if (data_is_lod_tensor(fetch_list[fetch_idx])) {
lodtensor_ptrs.push_back( lodtensor_ptrs.push_back(
&(BOOST_GET_CONST(LoDTensor, fetch_list[fetch_idx]))); &(PADDLE_GET_CONST(LoDTensor, fetch_list[fetch_idx])));
} else { } else {
lodtensorarray_ptrs.push_back( lodtensorarray_ptrs.push_back(
&(BOOST_GET_CONST(LoDTensorArray, fetch_list[fetch_idx]))); &(PADDLE_GET_CONST(LoDTensorArray, fetch_list[fetch_idx])));
} }
} }
if (lodtensor_ptrs.size() != 0) { if (lodtensor_ptrs.size() != 0) {
...@@ -318,7 +318,7 @@ FetchResultType ParallelSSAGraphExecutor::Run( ...@@ -318,7 +318,7 @@ FetchResultType ParallelSSAGraphExecutor::Run(
continue; continue;
} }
const auto &fetch_list = const auto &fetch_list =
BOOST_GET_CONST(FetchUnmergedList, fetch_data[scope_idx]); PADDLE_GET_CONST(FetchUnmergedList, fetch_data[scope_idx]);
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
fetch_list[fetch_idx].size(), fetch_list[fetch_idx].size(),
1, 1,
......
...@@ -645,7 +645,7 @@ void Executor::RunPreparedContext( ...@@ -645,7 +645,7 @@ void Executor::RunPreparedContext(
for (auto* op : global_block.AllOps()) { for (auto* op : global_block.AllOps()) {
if (op->Type() == kFeedOpType) { if (op->Type() == kFeedOpType) {
std::string feed_target_name = op->Output("Out")[0]; std::string feed_target_name = op->Output("Out")[0];
int idx = BOOST_GET_CONST(int, op->GetAttr("col")); int idx = PADDLE_GET_CONST(int, op->GetAttr("col"));
SetFeedVariable( SetFeedVariable(
scope, *(*feed_targets)[feed_target_name], feed_holder_name, idx); scope, *(*feed_targets)[feed_target_name], feed_holder_name, idx);
} }
...@@ -657,7 +657,7 @@ void Executor::RunPreparedContext( ...@@ -657,7 +657,7 @@ void Executor::RunPreparedContext(
for (auto* op : global_block.AllOps()) { for (auto* op : global_block.AllOps()) {
if (op->Type() == kFetchOpType) { if (op->Type() == kFetchOpType) {
std::string fetch_target_name = op->Input("X")[0]; std::string fetch_target_name = op->Input("X")[0];
int idx = BOOST_GET_CONST(int, op->GetAttr("col")); int idx = PADDLE_GET_CONST(int, op->GetAttr("col"));
*(*fetch_targets)[fetch_target_name] = *(*fetch_targets)[fetch_target_name] =
GetFetchVariable(*scope, fetch_holder_name, idx); GetFetchVariable(*scope, fetch_holder_name, idx);
} }
......
...@@ -40,7 +40,7 @@ void SetFeedVariable(Scope* scope, ...@@ -40,7 +40,7 @@ void SetFeedVariable(Scope* scope,
feed_inputs.resize(index + 1); feed_inputs.resize(index + 1);
} }
// shared data with input tensor // shared data with input tensor
auto& val = BOOST_GET(LoDTensor, feed_inputs[index]); auto& val = PADDLE_GET(LoDTensor, feed_inputs[index]);
val.ShareDataWith(input); val.ShareDataWith(input);
// set lod // set lod
val.set_lod(input.lod()); val.set_lod(input.lod());
......
...@@ -173,7 +173,7 @@ class GradOpDescMakerBase { ...@@ -173,7 +173,7 @@ class GradOpDescMakerBase {
template <typename T> template <typename T>
inline const T& Attr(const std::string& name) const { inline const T& Attr(const std::string& name) const {
return BOOST_GET_CONST(T, GetAttr(name)); return PADDLE_GET_CONST(T, GetAttr(name));
} }
std::string ForwardOpType() const { return this->fwd_op_.Type(); } std::string ForwardOpType() const { return this->fwd_op_.Type(); }
......
...@@ -129,17 +129,17 @@ class InferShapeArgumentMappingContext : public phi::ArgumentMappingContext { ...@@ -129,17 +129,17 @@ class InferShapeArgumentMappingContext : public phi::ArgumentMappingContext {
int64_t CompatMetaTensor::numel() const { int64_t CompatMetaTensor::numel() const {
if (is_runtime_) { if (is_runtime_) {
auto* var = BOOST_GET_CONST(Variable*, var_); auto* var = PADDLE_GET_CONST(Variable*, var_);
return var->Get<Tensor>().numel(); return var->Get<Tensor>().numel();
} else { } else {
auto* var = BOOST_GET_CONST(VarDesc*, var_); auto* var = PADDLE_GET_CONST(VarDesc*, var_);
return var->ElementSize(); return var->ElementSize();
} }
} }
DDim CompatMetaTensor::dims() const { DDim CompatMetaTensor::dims() const {
if (is_runtime_) { if (is_runtime_) {
auto* var = BOOST_GET_CONST(Variable*, var_); auto* var = PADDLE_GET_CONST(Variable*, var_);
if (var->IsType<phi::DenseTensor>()) { if (var->IsType<phi::DenseTensor>()) {
return var->Get<phi::DenseTensor>().dims(); return var->Get<phi::DenseTensor>().dims();
} else if (var->IsType<phi::SelectedRows>()) { } else if (var->IsType<phi::SelectedRows>()) {
...@@ -154,7 +154,7 @@ DDim CompatMetaTensor::dims() const { ...@@ -154,7 +154,7 @@ DDim CompatMetaTensor::dims() const {
"DenseTensorArray.")); "DenseTensorArray."));
} }
} else { } else {
auto* var = BOOST_GET_CONST(VarDesc*, var_); auto* var = PADDLE_GET_CONST(VarDesc*, var_);
return var->GetShape().empty() ? phi::make_ddim({0UL}) return var->GetShape().empty() ? phi::make_ddim({0UL})
: phi::make_ddim(var->GetShape()); : phi::make_ddim(var->GetShape());
...@@ -163,7 +163,7 @@ DDim CompatMetaTensor::dims() const { ...@@ -163,7 +163,7 @@ DDim CompatMetaTensor::dims() const {
phi::DataType CompatMetaTensor::dtype() const { phi::DataType CompatMetaTensor::dtype() const {
if (is_runtime_) { if (is_runtime_) {
auto* var = BOOST_GET_CONST(Variable*, var_); auto* var = PADDLE_GET_CONST(Variable*, var_);
if (var->IsType<phi::DenseTensor>()) { if (var->IsType<phi::DenseTensor>()) {
return var->Get<phi::DenseTensor>().dtype(); return var->Get<phi::DenseTensor>().dtype();
} else if (var->IsType<phi::SelectedRows>()) { } else if (var->IsType<phi::SelectedRows>()) {
...@@ -177,14 +177,14 @@ phi::DataType CompatMetaTensor::dtype() const { ...@@ -177,14 +177,14 @@ phi::DataType CompatMetaTensor::dtype() const {
"Currently, only can get dtype from DenseTensor or SelectedRows.")); "Currently, only can get dtype from DenseTensor or SelectedRows."));
} }
} else { } else {
auto* var = BOOST_GET_CONST(VarDesc*, var_); auto* var = PADDLE_GET_CONST(VarDesc*, var_);
return paddle::framework::TransToPhiDataType(var->GetDataType()); return paddle::framework::TransToPhiDataType(var->GetDataType());
} }
} }
DataLayout CompatMetaTensor::layout() const { DataLayout CompatMetaTensor::layout() const {
if (is_runtime_) { if (is_runtime_) {
auto* var = BOOST_GET_CONST(Variable*, var_); auto* var = PADDLE_GET_CONST(Variable*, var_);
if (var->IsType<phi::DenseTensor>()) { if (var->IsType<phi::DenseTensor>()) {
return var->Get<phi::DenseTensor>().layout(); return var->Get<phi::DenseTensor>().layout();
} else if (var->IsType<phi::SelectedRows>()) { } else if (var->IsType<phi::SelectedRows>()) {
...@@ -207,7 +207,7 @@ DataLayout CompatMetaTensor::layout() const { ...@@ -207,7 +207,7 @@ DataLayout CompatMetaTensor::layout() const {
void CompatMetaTensor::set_dims(const DDim& dims) { void CompatMetaTensor::set_dims(const DDim& dims) {
if (is_runtime_) { if (is_runtime_) {
auto* var = BOOST_GET(Variable*, var_); auto* var = PADDLE_GET(Variable*, var_);
if (var->IsType<phi::DenseTensor>()) { if (var->IsType<phi::DenseTensor>()) {
auto* tensor = var->GetMutable<phi::DenseTensor>(); auto* tensor = var->GetMutable<phi::DenseTensor>();
phi::DenseTensorUtils::GetMutableMeta(tensor)->dims = dims; phi::DenseTensorUtils::GetMutableMeta(tensor)->dims = dims;
...@@ -230,14 +230,14 @@ void CompatMetaTensor::set_dims(const DDim& dims) { ...@@ -230,14 +230,14 @@ void CompatMetaTensor::set_dims(const DDim& dims) {
"Currently, only can set dims from DenseTensor or SelectedRows.")); "Currently, only can set dims from DenseTensor or SelectedRows."));
} }
} else { } else {
auto* var = BOOST_GET(VarDesc*, var_); auto* var = PADDLE_GET(VarDesc*, var_);
var->SetShape(vectorize(dims)); var->SetShape(vectorize(dims));
} }
} }
void CompatMetaTensor::set_dtype(phi::DataType dtype) { void CompatMetaTensor::set_dtype(phi::DataType dtype) {
if (is_runtime_) { if (is_runtime_) {
auto* var = BOOST_GET(Variable*, var_); auto* var = PADDLE_GET(Variable*, var_);
if (var->IsType<phi::DenseTensor>()) { if (var->IsType<phi::DenseTensor>()) {
auto* tensor = var->GetMutable<phi::DenseTensor>(); auto* tensor = var->GetMutable<phi::DenseTensor>();
phi::DenseTensorUtils::GetMutableMeta(tensor)->dtype = dtype; phi::DenseTensorUtils::GetMutableMeta(tensor)->dtype = dtype;
...@@ -252,14 +252,14 @@ void CompatMetaTensor::set_dtype(phi::DataType dtype) { ...@@ -252,14 +252,14 @@ void CompatMetaTensor::set_dtype(phi::DataType dtype) {
"Currently, only can set dtype from DenseTensor or SelectedRows.")); "Currently, only can set dtype from DenseTensor or SelectedRows."));
} }
} else { } else {
auto* var = BOOST_GET(VarDesc*, var_); auto* var = PADDLE_GET(VarDesc*, var_);
var->SetDataType(paddle::framework::TransToProtoVarType(dtype)); var->SetDataType(paddle::framework::TransToProtoVarType(dtype));
} }
} }
void CompatMetaTensor::set_layout(DataLayout layout) { void CompatMetaTensor::set_layout(DataLayout layout) {
if (is_runtime_) { if (is_runtime_) {
auto* var = BOOST_GET(Variable*, var_); auto* var = PADDLE_GET(Variable*, var_);
if (var->IsType<phi::DenseTensor>()) { if (var->IsType<phi::DenseTensor>()) {
auto* tensor = var->GetMutable<phi::DenseTensor>(); auto* tensor = var->GetMutable<phi::DenseTensor>();
phi::DenseTensorUtils::GetMutableMeta(tensor)->layout = layout; phi::DenseTensorUtils::GetMutableMeta(tensor)->layout = layout;
...@@ -282,7 +282,7 @@ void CompatMetaTensor::set_layout(DataLayout layout) { ...@@ -282,7 +282,7 @@ void CompatMetaTensor::set_layout(DataLayout layout) {
void CompatMetaTensor::share_lod(const MetaTensor& meta_tensor) { void CompatMetaTensor::share_lod(const MetaTensor& meta_tensor) {
if (is_runtime_) { if (is_runtime_) {
auto* var = BOOST_GET(Variable*, var_); auto* var = PADDLE_GET(Variable*, var_);
if (var->IsType<phi::DenseTensor>()) { if (var->IsType<phi::DenseTensor>()) {
auto* tensor = var->GetMutable<phi::DenseTensor>(); auto* tensor = var->GetMutable<phi::DenseTensor>();
phi::DenseTensorUtils::GetMutableMeta(tensor)->lod = phi::DenseTensorUtils::GetMutableMeta(tensor)->lod =
...@@ -292,7 +292,7 @@ void CompatMetaTensor::share_lod(const MetaTensor& meta_tensor) { ...@@ -292,7 +292,7 @@ void CompatMetaTensor::share_lod(const MetaTensor& meta_tensor) {
// only LoDTensor need to share lod // only LoDTensor need to share lod
} }
} else { } else {
auto* var = BOOST_GET(VarDesc*, var_); auto* var = PADDLE_GET(VarDesc*, var_);
var->SetLoDLevel( var->SetLoDLevel(
static_cast<const CompatMetaTensor&>(meta_tensor).GetCompileTimeLoD()); static_cast<const CompatMetaTensor&>(meta_tensor).GetCompileTimeLoD());
} }
...@@ -301,7 +301,7 @@ void CompatMetaTensor::share_lod(const MetaTensor& meta_tensor) { ...@@ -301,7 +301,7 @@ void CompatMetaTensor::share_lod(const MetaTensor& meta_tensor) {
void CompatMetaTensor::share_dims(const MetaTensor& meta_tensor) { void CompatMetaTensor::share_dims(const MetaTensor& meta_tensor) {
set_dims(meta_tensor.dims()); set_dims(meta_tensor.dims());
if (is_runtime_) { if (is_runtime_) {
auto* var = BOOST_GET(Variable*, var_); auto* var = PADDLE_GET(Variable*, var_);
if (var->IsType<phi::SelectedRows>()) { if (var->IsType<phi::SelectedRows>()) {
auto* selected_rows = var->GetMutable<phi::SelectedRows>(); auto* selected_rows = var->GetMutable<phi::SelectedRows>();
auto& input_selected_rows = auto& input_selected_rows =
...@@ -461,15 +461,15 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, ...@@ -461,15 +461,15 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
switch (AttrTypeID(attr)) { switch (AttrTypeID(attr)) {
case framework::proto::AttrType::FLOAT: case framework::proto::AttrType::FLOAT:
infer_meta_context.EmplaceBackAttr( infer_meta_context.EmplaceBackAttr(
phi::Scalar(BOOST_GET_CONST(float, attr))); phi::Scalar(PADDLE_GET_CONST(float, attr)));
break; break;
case framework::proto::AttrType::INT: case framework::proto::AttrType::INT:
infer_meta_context.EmplaceBackAttr( infer_meta_context.EmplaceBackAttr(
phi::Scalar(BOOST_GET_CONST(int, attr))); phi::Scalar(PADDLE_GET_CONST(int, attr)));
break; break;
case framework::proto::AttrType::STRING: case framework::proto::AttrType::STRING:
infer_meta_context.EmplaceBackAttr( infer_meta_context.EmplaceBackAttr(
phi::Scalar(BOOST_GET_CONST(std::string, attr))); phi::Scalar(PADDLE_GET_CONST(std::string, attr)));
break; break;
default: default:
PADDLE_THROW(platform::errors::Unimplemented( PADDLE_THROW(platform::errors::Unimplemented(
...@@ -481,7 +481,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, ...@@ -481,7 +481,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
auto infershape_input = std::move(ctx->GetInputVarPtrs(attr_name)); auto infershape_input = std::move(ctx->GetInputVarPtrs(attr_name));
if (infershape_input.size() == 1) { if (infershape_input.size() == 1) {
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
Variable* var = BOOST_GET_CONST(Variable*, infershape_input[0]); Variable* var = PADDLE_GET_CONST(Variable*, infershape_input[0]);
infer_meta_context.EmplaceBackAttr( infer_meta_context.EmplaceBackAttr(
std::move(experimental::MakePhiScalarFromVar(*var))); std::move(experimental::MakePhiScalarFromVar(*var)));
} else { } else {
...@@ -507,15 +507,15 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, ...@@ -507,15 +507,15 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
switch (AttrTypeID(attr)) { switch (AttrTypeID(attr)) {
case framework::proto::AttrType::INTS: case framework::proto::AttrType::INTS:
infer_meta_context.EmplaceBackAttr(std::move( infer_meta_context.EmplaceBackAttr(std::move(
phi::IntArray(BOOST_GET_CONST(std::vector<int32_t>, attr)))); phi::IntArray(PADDLE_GET_CONST(std::vector<int32_t>, attr))));
break; break;
case framework::proto::AttrType::LONGS: case framework::proto::AttrType::LONGS:
infer_meta_context.EmplaceBackAttr(std::move( infer_meta_context.EmplaceBackAttr(std::move(
phi::IntArray(BOOST_GET_CONST(std::vector<int64_t>, attr)))); phi::IntArray(PADDLE_GET_CONST(std::vector<int64_t>, attr))));
break; break;
case framework::proto::AttrType::INT: case framework::proto::AttrType::INT:
infer_meta_context.EmplaceBackAttr( infer_meta_context.EmplaceBackAttr(
phi::IntArray({BOOST_GET_CONST(int, attr)})); phi::IntArray({PADDLE_GET_CONST(int, attr)}));
break; break;
default: default:
PADDLE_THROW(platform::errors::Unimplemented( PADDLE_THROW(platform::errors::Unimplemented(
...@@ -531,7 +531,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, ...@@ -531,7 +531,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
std::vector<Variable*> vars; std::vector<Variable*> vars;
vars.reserve(infershape_inputs.size()); vars.reserve(infershape_inputs.size());
for (size_t i = 0; i < infershape_inputs.size(); i++) { for (size_t i = 0; i < infershape_inputs.size(); i++) {
vars.push_back(BOOST_GET_CONST(Variable*, infershape_inputs[i])); vars.push_back(PADDLE_GET_CONST(Variable*, infershape_inputs[i]));
} }
if (infershape_inputs.size() != 1) { if (infershape_inputs.size() != 1) {
infer_meta_context.EmplaceBackAttr( infer_meta_context.EmplaceBackAttr(
...@@ -545,7 +545,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, ...@@ -545,7 +545,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
std::vector<VarDesc*> vars; std::vector<VarDesc*> vars;
vars.reserve(infershape_inputs.size()); vars.reserve(infershape_inputs.size());
for (size_t i = 0; i < infershape_inputs.size(); ++i) { for (size_t i = 0; i < infershape_inputs.size(); ++i) {
vars.push_back(BOOST_GET_CONST(VarDesc*, infershape_inputs[i])); vars.push_back(PADDLE_GET_CONST(VarDesc*, infershape_inputs[i]));
} }
int64_t num_ele = 0; int64_t num_ele = 0;
...@@ -576,7 +576,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, ...@@ -576,7 +576,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
auto& attr = *attr_ptr; auto& attr = *attr_ptr;
switch (AttrTypeID(attr)) { switch (AttrTypeID(attr)) {
case framework::proto::AttrType::INTS: { case framework::proto::AttrType::INTS: {
const auto& vec = BOOST_GET_CONST(std::vector<int32_t>, attr); const auto& vec = PADDLE_GET_CONST(std::vector<int32_t>, attr);
std::vector<phi::Scalar> scalar_list; std::vector<phi::Scalar> scalar_list;
scalar_list.reserve(vec.size()); scalar_list.reserve(vec.size());
for (const auto& val : vec) { for (const auto& val : vec) {
...@@ -585,7 +585,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, ...@@ -585,7 +585,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
infer_meta_context.EmplaceBackAttr(std::move(scalar_list)); infer_meta_context.EmplaceBackAttr(std::move(scalar_list));
} break; } break;
case framework::proto::AttrType::LONGS: { case framework::proto::AttrType::LONGS: {
const auto& vec = BOOST_GET_CONST(std::vector<int64_t>, attr); const auto& vec = PADDLE_GET_CONST(std::vector<int64_t>, attr);
std::vector<phi::Scalar> scalar_list; std::vector<phi::Scalar> scalar_list;
scalar_list.reserve(vec.size()); scalar_list.reserve(vec.size());
for (const auto& val : vec) { for (const auto& val : vec) {
...@@ -594,7 +594,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, ...@@ -594,7 +594,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
infer_meta_context.EmplaceBackAttr(std::move(scalar_list)); infer_meta_context.EmplaceBackAttr(std::move(scalar_list));
} break; } break;
case framework::proto::AttrType::FLOATS: { case framework::proto::AttrType::FLOATS: {
const auto& vec = BOOST_GET_CONST(std::vector<float>, attr); const auto& vec = PADDLE_GET_CONST(std::vector<float>, attr);
std::vector<phi::Scalar> scalar_list; std::vector<phi::Scalar> scalar_list;
scalar_list.reserve(vec.size()); scalar_list.reserve(vec.size());
for (const auto& val : vec) { for (const auto& val : vec) {
...@@ -603,7 +603,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, ...@@ -603,7 +603,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
infer_meta_context.EmplaceBackAttr(std::move(scalar_list)); infer_meta_context.EmplaceBackAttr(std::move(scalar_list));
} break; } break;
case framework::proto::AttrType::FLOAT64S: { case framework::proto::AttrType::FLOAT64S: {
const auto& vec = BOOST_GET_CONST(std::vector<double>, attr); const auto& vec = PADDLE_GET_CONST(std::vector<double>, attr);
std::vector<phi::Scalar> scalar_list; std::vector<phi::Scalar> scalar_list;
scalar_list.reserve(vec.size()); scalar_list.reserve(vec.size());
for (const auto& val : vec) { for (const auto& val : vec) {
...@@ -626,41 +626,41 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, ...@@ -626,41 +626,41 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
auto& attr = *attr_ptr; auto& attr = *attr_ptr;
switch (attr_defs[i].type_index) { switch (attr_defs[i].type_index) {
case phi::AttributeType::FLOAT32: case phi::AttributeType::FLOAT32:
infer_meta_context.EmplaceBackAttr(BOOST_GET_CONST(float, attr)); infer_meta_context.EmplaceBackAttr(PADDLE_GET_CONST(float, attr));
break; break;
case phi::AttributeType::INT32: case phi::AttributeType::INT32:
infer_meta_context.EmplaceBackAttr(BOOST_GET_CONST(int, attr)); infer_meta_context.EmplaceBackAttr(PADDLE_GET_CONST(int, attr));
break; break;
case phi::AttributeType::BOOL: case phi::AttributeType::BOOL:
infer_meta_context.EmplaceBackAttr(BOOST_GET_CONST(bool, attr)); infer_meta_context.EmplaceBackAttr(PADDLE_GET_CONST(bool, attr));
break; break;
case phi::AttributeType::INT64: case phi::AttributeType::INT64:
infer_meta_context.EmplaceBackAttr( infer_meta_context.EmplaceBackAttr(
BOOST_GET_CONST(int64_t, attr)); PADDLE_GET_CONST(int64_t, attr));
break; break;
case phi::AttributeType::INT32S: case phi::AttributeType::INT32S:
infer_meta_context.EmplaceBackAttr( infer_meta_context.EmplaceBackAttr(
BOOST_GET_CONST(std::vector<int>, attr)); PADDLE_GET_CONST(std::vector<int>, attr));
break; break;
case phi::AttributeType::DATA_TYPE: { case phi::AttributeType::DATA_TYPE: {
auto data_type = paddle::framework::TransToPhiDataType( auto data_type = paddle::framework::TransToPhiDataType(
static_cast<framework::proto::VarType::Type>( static_cast<framework::proto::VarType::Type>(
BOOST_GET_CONST(int, attr))); PADDLE_GET_CONST(int, attr)));
infer_meta_context.EmplaceBackAttr(data_type); infer_meta_context.EmplaceBackAttr(data_type);
} break; } break;
case phi::AttributeType::STRING: case phi::AttributeType::STRING:
infer_meta_context.EmplaceBackAttr( infer_meta_context.EmplaceBackAttr(
BOOST_GET_CONST(std::string, attr)); PADDLE_GET_CONST(std::string, attr));
break; break;
case phi::AttributeType::INT64S: case phi::AttributeType::INT64S:
switch (AttrTypeID(attr)) { switch (AttrTypeID(attr)) {
case framework::proto::AttrType::LONGS: case framework::proto::AttrType::LONGS:
infer_meta_context.EmplaceBackAttr( infer_meta_context.EmplaceBackAttr(
BOOST_GET_CONST(std::vector<int64_t>, attr)); PADDLE_GET_CONST(std::vector<int64_t>, attr));
break; break;
case framework::proto::AttrType::INTS: { case framework::proto::AttrType::INTS: {
const auto& vector_int_attr = const auto& vector_int_attr =
BOOST_GET_CONST(std::vector<int>, attr); PADDLE_GET_CONST(std::vector<int>, attr);
const std::vector<int64_t> vector_int64_attr( const std::vector<int64_t> vector_int64_attr(
vector_int_attr.begin(), vector_int_attr.end()); vector_int_attr.begin(), vector_int_attr.end());
infer_meta_context.EmplaceBackAttr(vector_int64_attr); infer_meta_context.EmplaceBackAttr(vector_int64_attr);
...@@ -675,19 +675,19 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, ...@@ -675,19 +675,19 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
break; break;
case phi::AttributeType::FLOAT32S: case phi::AttributeType::FLOAT32S:
infer_meta_context.EmplaceBackAttr( infer_meta_context.EmplaceBackAttr(
BOOST_GET_CONST(std::vector<float>, attr)); PADDLE_GET_CONST(std::vector<float>, attr));
break; break;
case phi::AttributeType::STRINGS: case phi::AttributeType::STRINGS:
infer_meta_context.EmplaceBackAttr( infer_meta_context.EmplaceBackAttr(
BOOST_GET_CONST(std::vector<std::string>, attr)); PADDLE_GET_CONST(std::vector<std::string>, attr));
break; break;
case phi::AttributeType::BOOLS: case phi::AttributeType::BOOLS:
infer_meta_context.EmplaceBackAttr( infer_meta_context.EmplaceBackAttr(
BOOST_GET_CONST(std::vector<bool>, attr)); PADDLE_GET_CONST(std::vector<bool>, attr));
break; break;
case phi::AttributeType::FLOAT64S: case phi::AttributeType::FLOAT64S:
infer_meta_context.EmplaceBackAttr( infer_meta_context.EmplaceBackAttr(
BOOST_GET_CONST(std::vector<double>, attr)); PADDLE_GET_CONST(std::vector<double>, attr));
break; break;
default: default:
PADDLE_THROW(platform::errors::Unimplemented( PADDLE_THROW(platform::errors::Unimplemented(
...@@ -714,12 +714,12 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, ...@@ -714,12 +714,12 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
outputs; outputs;
for (const auto& out : output_var) { for (const auto& out : output_var) {
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
if (BOOST_GET_CONST(Variable*, out)) { if (PADDLE_GET_CONST(Variable*, out)) {
outputs.emplace_back( outputs.emplace_back(
std::move(CompatMetaTensor(out, ctx->IsRuntime()))); std::move(CompatMetaTensor(out, ctx->IsRuntime())));
continue; continue;
} }
} else if (BOOST_GET_CONST(VarDesc*, out)) { } else if (PADDLE_GET_CONST(VarDesc*, out)) {
outputs.emplace_back( outputs.emplace_back(
std::move(CompatMetaTensor(out, ctx->IsRuntime()))); std::move(CompatMetaTensor(out, ctx->IsRuntime())));
continue; continue;
......
...@@ -67,12 +67,12 @@ class CompatMetaTensor : public phi::MetaTensor { ...@@ -67,12 +67,12 @@ class CompatMetaTensor : public phi::MetaTensor {
private: private:
const LoD& GetRuntimeLoD() const { const LoD& GetRuntimeLoD() const {
auto* var = BOOST_GET_CONST(Variable*, var_); auto* var = PADDLE_GET_CONST(Variable*, var_);
return var->Get<LoDTensor>().lod(); return var->Get<LoDTensor>().lod();
} }
int32_t GetCompileTimeLoD() const { int32_t GetCompileTimeLoD() const {
auto* var = BOOST_GET_CONST(VarDesc*, var_); auto* var = PADDLE_GET_CONST(VarDesc*, var_);
return var->GetLoDLevel(); return var->GetLoDLevel();
} }
...@@ -81,7 +81,7 @@ class CompatMetaTensor : public phi::MetaTensor { ...@@ -81,7 +81,7 @@ class CompatMetaTensor : public phi::MetaTensor {
true, true,
platform::errors::Unavailable( platform::errors::Unavailable(
"Only can get Tensor from MetaTensor in rumtime.")); "Only can get Tensor from MetaTensor in rumtime."));
auto* var = BOOST_GET_CONST(Variable*, var_); auto* var = PADDLE_GET_CONST(Variable*, var_);
PADDLE_ENFORCE_EQ(var->IsType<phi::SelectedRows>(), PADDLE_ENFORCE_EQ(var->IsType<phi::SelectedRows>(),
true, true,
platform::errors::Unavailable( platform::errors::Unavailable(
......
...@@ -77,17 +77,17 @@ void AdaptivePool2dConvertGlobalPass::ApplyImpl(ir::Graph* graph) const { ...@@ -77,17 +77,17 @@ void AdaptivePool2dConvertGlobalPass::ApplyImpl(ir::Graph* graph) const {
op->HasAttr("ksize")) { op->HasAttr("ksize")) {
if (op->HasAttr("global_pooling")) { if (op->HasAttr("global_pooling")) {
bool global_pooling = bool global_pooling =
BOOST_GET_CONST(bool, op->GetAttr("global_pooling")); PADDLE_GET_CONST(bool, op->GetAttr("global_pooling"));
if (global_pooling) continue; if (global_pooling) continue;
} }
if (!op->HasAttr("pooling_type")) continue; if (!op->HasAttr("pooling_type")) continue;
std::string type = std::string type =
BOOST_GET_CONST(std::string, op->GetAttr("pooling_type")); PADDLE_GET_CONST(std::string, op->GetAttr("pooling_type"));
// adaptive has no effect on max pooling // adaptive has no effect on max pooling
if (type == "max") continue; if (type == "max") continue;
bool adaptive = BOOST_GET_CONST(bool, op->GetAttr("adaptive")); bool adaptive = PADDLE_GET_CONST(bool, op->GetAttr("adaptive"));
std::vector<int> ksize = std::vector<int> ksize =
BOOST_GET_CONST(std::vector<int>, op->GetAttr("ksize")); PADDLE_GET_CONST(std::vector<int>, op->GetAttr("ksize"));
if (adaptive && ksize.size() == 2 && ksize[0] == 1 && ksize[1] == 1) { if (adaptive && ksize.size() == 2 && ksize[0] == 1 && ksize[1] == 1) {
op->SetAttr("adaptive", false); op->SetAttr("adaptive", false);
op->SetAttr("global_pooling", true); op->SetAttr("global_pooling", true);
......
...@@ -46,7 +46,7 @@ TEST(AdaptivePool2dConvertGlobalPass, basic) { ...@@ -46,7 +46,7 @@ TEST(AdaptivePool2dConvertGlobalPass, basic) {
if (node->IsOp() && node->Op()->Type() == "pool2d") { if (node->IsOp() && node->Op()->Type() == "pool2d") {
if (node->Op()->HasAttr("global_pooling")) { if (node->Op()->HasAttr("global_pooling")) {
global_pooling = global_pooling =
BOOST_GET_CONST(bool, node->Op()->GetAttr("global_pooling")); PADDLE_GET_CONST(bool, node->Op()->GetAttr("global_pooling"));
} }
} }
} }
......
...@@ -308,7 +308,7 @@ void ConvBNFusePass::ApplyImpl(ir::Graph* graph) const { ...@@ -308,7 +308,7 @@ void ConvBNFusePass::ApplyImpl(ir::Graph* graph) const {
// update weights and biases // update weights and biases
float epsilon = float epsilon =
BOOST_GET_CONST(float, batch_norm->Op()->GetAttr("epsilon")); PADDLE_GET_CONST(float, batch_norm->Op()->GetAttr("epsilon"));
recompute_bias_and_weights(scope, recompute_bias_and_weights(scope,
conv_weight, conv_weight,
*bn_scale, *bn_scale,
...@@ -552,7 +552,7 @@ void ConvEltwiseAddBNFusePass::ApplyImpl(ir::Graph* graph) const { ...@@ -552,7 +552,7 @@ void ConvEltwiseAddBNFusePass::ApplyImpl(ir::Graph* graph) const {
// update weights and biases // update weights and biases
float epsilon = float epsilon =
BOOST_GET_CONST(float, batch_norm->Op()->GetAttr("epsilon")); PADDLE_GET_CONST(float, batch_norm->Op()->GetAttr("epsilon"));
// if bias is an input to other ops as well then we cannot overwrite it // if bias is an input to other ops as well then we cannot overwrite it
// so we create separate elementwise Y in nodes // so we create separate elementwise Y in nodes
......
...@@ -92,7 +92,7 @@ class PlacementPassTest { ...@@ -92,7 +92,7 @@ class PlacementPassTest {
if (node->IsOp() && node->Op()) { if (node->IsOp() && node->Op()) {
auto* op = node->Op(); auto* op = node->Op();
if (op->HasAttr("use_cudnn") && if (op->HasAttr("use_cudnn") &&
BOOST_GET_CONST(bool, op->GetAttr("use_cudnn"))) { PADDLE_GET_CONST(bool, op->GetAttr("use_cudnn"))) {
++use_cudnn_true_count; ++use_cudnn_true_count;
} }
} }
......
...@@ -63,9 +63,9 @@ void DeleteFillConstantOpPass::ApplyImpl(ir::Graph* graph) const { ...@@ -63,9 +63,9 @@ void DeleteFillConstantOpPass::ApplyImpl(ir::Graph* graph) const {
Node* fill_constant_out_node = subgraph.at(fill_constant_out); Node* fill_constant_out_node = subgraph.at(fill_constant_out);
// Get fill_constant's attr // Get fill_constant's attr
auto fill_constant = fill_constant_op_node->Op(); auto fill_constant = fill_constant_op_node->Op();
auto value = BOOST_GET_CONST(float, fill_constant->GetAttr("value")); auto value = PADDLE_GET_CONST(float, fill_constant->GetAttr("value"));
auto shape = auto shape =
BOOST_GET_CONST(std::vector<int64_t>, fill_constant->GetAttr("shape")); PADDLE_GET_CONST(std::vector<int64_t>, fill_constant->GetAttr("shape"));
auto* scope = param_scope(); auto* scope = param_scope();
auto fill_constant_out_desc = fill_constant_out_node->Var(); auto fill_constant_out_desc = fill_constant_out_node->Var();
fill_constant_out_desc->SetShape(shape); fill_constant_out_desc->SetShape(shape);
......
...@@ -96,7 +96,7 @@ void DeleteQuantDequantFilterOpPass::ApplyImpl(ir::Graph* graph) const { ...@@ -96,7 +96,7 @@ void DeleteQuantDequantFilterOpPass::ApplyImpl(ir::Graph* graph) const {
} }
std::unordered_set<const Node*> nodes2rm = {}; std::unordered_set<const Node*> nodes2rm = {};
int bit_length = int bit_length =
BOOST_GET_CONST(int, quant_dequant_op->Op()->GetAttr("bit_length")); PADDLE_GET_CONST(int, quant_dequant_op->Op()->GetAttr("bit_length"));
int range = ((1 << (bit_length - 1)) - 1); int range = ((1 << (bit_length - 1)) - 1);
std::vector<float> weight_scale; std::vector<float> weight_scale;
std::string quant_dequant_op_out_name = quant_dequant_op_out->Var()->Name(); std::string quant_dequant_op_out_name = quant_dequant_op_out->Var()->Name();
...@@ -133,7 +133,7 @@ void DeleteQuantDequantFilterOpPass::ApplyImpl(ir::Graph* graph) const { ...@@ -133,7 +133,7 @@ void DeleteQuantDequantFilterOpPass::ApplyImpl(ir::Graph* graph) const {
// Get weight scale // Get weight scale
if (dequant_type == "fake_channel_wise_quantize_dequantize_abs_max") { if (dequant_type == "fake_channel_wise_quantize_dequantize_abs_max") {
int quant_axis = int quant_axis =
BOOST_GET_CONST(int, quant_dequant_op->Op()->GetAttr("quant_axis")); PADDLE_GET_CONST(int, quant_dequant_op->Op()->GetAttr("quant_axis"));
PADDLE_ENFORCE_EQ(quant_axis == 0 || quant_axis == 1, PADDLE_ENFORCE_EQ(quant_axis == 0 || quant_axis == 1,
true, true,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
......
...@@ -113,7 +113,7 @@ void DeleteQuantDequantLinearOpPass::ApplyImpl(ir::Graph* graph) const { ...@@ -113,7 +113,7 @@ void DeleteQuantDequantLinearOpPass::ApplyImpl(ir::Graph* graph) const {
*/ */
std::unordered_set<const Node*> nodes2rm = {}; std::unordered_set<const Node*> nodes2rm = {};
int bit_length = int bit_length =
BOOST_GET_CONST(int, quantize_linear_op->Op()->GetAttr("bit_length")); PADDLE_GET_CONST(int, quantize_linear_op->Op()->GetAttr("bit_length"));
int range = ((1 << (bit_length - 1)) - 1); int range = ((1 << (bit_length - 1)) - 1);
// Get input scale from tensor // Get input scale from tensor
......
...@@ -61,7 +61,7 @@ void DeleteQuantDequantOpPass::ApplyImpl(ir::Graph* graph) const { ...@@ -61,7 +61,7 @@ void DeleteQuantDequantOpPass::ApplyImpl(ir::Graph* graph) const {
Node* input = subgraph.at(input_node); Node* input = subgraph.at(input_node);
GET_NODES; GET_NODES;
int bit_length = int bit_length =
BOOST_GET_CONST(int, quant_dequant_op->Op()->GetAttr("bit_length")); PADDLE_GET_CONST(int, quant_dequant_op->Op()->GetAttr("bit_length"));
// Get input scale from tensor // Get input scale from tensor
std::string input_scale_var_name = std::string input_scale_var_name =
......
...@@ -297,7 +297,7 @@ void DeleteWeightQuantDequantLinearOpPass::ApplyImpl(ir::Graph* graph) const { ...@@ -297,7 +297,7 @@ void DeleteWeightQuantDequantLinearOpPass::ApplyImpl(ir::Graph* graph) const {
} }
*/ */
std::unordered_set<const Node*> nodes2rm = {}; std::unordered_set<const Node*> nodes2rm = {};
int bit_length = BOOST_GET_CONST( int bit_length = PADDLE_GET_CONST(
int, weight_dequantize_linear_op->Op()->GetAttr("bit_length")); int, weight_dequantize_linear_op->Op()->GetAttr("bit_length"));
int range = ((1 << (bit_length - 1)) - 1); int range = ((1 << (bit_length - 1)) - 1);
...@@ -327,7 +327,7 @@ void DeleteWeightQuantDequantLinearOpPass::ApplyImpl(ir::Graph* graph) const { ...@@ -327,7 +327,7 @@ void DeleteWeightQuantDequantLinearOpPass::ApplyImpl(ir::Graph* graph) const {
std::vector<float> weight_data_tmp; std::vector<float> weight_data_tmp;
weight_data_tmp.reserve(weight_tensor->numel()); weight_data_tmp.reserve(weight_tensor->numel());
int quant_axis = BOOST_GET_CONST( int quant_axis = PADDLE_GET_CONST(
int, weight_dequantize_linear_op->Op()->GetAttr("quant_axis")); int, weight_dequantize_linear_op->Op()->GetAttr("quant_axis"));
if (quant_axis == -1) { // per_layer quant_dequant: all OP if (quant_axis == -1) { // per_layer quant_dequant: all OP
PADDLE_ENFORCE_EQ(weight_scale_nums, PADDLE_ENFORCE_EQ(weight_scale_nums,
......
...@@ -230,9 +230,9 @@ static int BuildFusion(Graph* graph, ...@@ -230,9 +230,9 @@ static int BuildFusion(Graph* graph,
// TODO(jczaja): Add support for is_sparse / is_distributed // TODO(jczaja): Add support for is_sparse / is_distributed
auto is_sparse = auto is_sparse =
BOOST_GET_CONST(bool, lookup_table->Op()->GetAttr("is_sparse")); PADDLE_GET_CONST(bool, lookup_table->Op()->GetAttr("is_sparse"));
auto is_distributed = auto is_distributed =
BOOST_GET_CONST(bool, lookup_table->Op()->GetAttr("is_distributed")); PADDLE_GET_CONST(bool, lookup_table->Op()->GetAttr("is_distributed"));
if (is_sparse == true || is_distributed == true) { if (is_sparse == true || is_distributed == true) {
return; return;
......
...@@ -253,7 +253,7 @@ void FCElementwiseLayerNormFusePass::ApplyImpl(ir::Graph *graph) const { ...@@ -253,7 +253,7 @@ void FCElementwiseLayerNormFusePass::ApplyImpl(ir::Graph *graph) const {
} }
int begin_norm_axis = int begin_norm_axis =
BOOST_GET_CONST(int, layer_norm->Op()->GetAttr("begin_norm_axis")); PADDLE_GET_CONST(int, layer_norm->Op()->GetAttr("begin_norm_axis"));
auto layer_norm_x_dims = fc_out->Var()->GetShape(); auto layer_norm_x_dims = fc_out->Var()->GetShape();
auto layer_norm_x_mat_dims = auto layer_norm_x_mat_dims =
phi::flatten_to_2d(phi::make_ddim(layer_norm_x_dims), begin_norm_axis); phi::flatten_to_2d(phi::make_ddim(layer_norm_x_dims), begin_norm_axis);
......
...@@ -139,8 +139,8 @@ int FCFusePass::ApplyFCPattern(Graph* graph, bool with_relu) const { ...@@ -139,8 +139,8 @@ int FCFusePass::ApplyFCPattern(Graph* graph, bool with_relu) const {
// axis of elementwise_add should be -1 or x_num_col_dims // axis of elementwise_add should be -1 or x_num_col_dims
auto x_num_col_dims = auto x_num_col_dims =
BOOST_GET_CONST(int, mul->Op()->GetAttr("x_num_col_dims")); PADDLE_GET_CONST(int, mul->Op()->GetAttr("x_num_col_dims"));
auto axis = BOOST_GET_CONST(int, elementwise_add->Op()->GetAttr("axis")); auto axis = PADDLE_GET_CONST(int, elementwise_add->Op()->GetAttr("axis"));
if (axis != -1 && axis != x_num_col_dims) return; if (axis != -1 && axis != x_num_col_dims) return;
// Shape of bias should be [1, out_size] or [out_size] // Shape of bias should be [1, out_size] or [out_size]
...@@ -263,7 +263,7 @@ int FCFusePass::ApplyFCPattern(Graph* graph, bool with_relu) const { ...@@ -263,7 +263,7 @@ int FCFusePass::ApplyFCPattern(Graph* graph, bool with_relu) const {
elementwise_add_op_desc->GetNullableAttr("out_threshold"); elementwise_add_op_desc->GetNullableAttr("out_threshold");
if (out_threshold_attr.index()) { if (out_threshold_attr.index()) {
VLOG(4) << "setting out_threshold: " VLOG(4) << "setting out_threshold: "
<< BOOST_GET_CONST(float, out_threshold_attr); << PADDLE_GET_CONST(float, out_threshold_attr);
desc.SetAttr("out_threshold", out_threshold_attr); desc.SetAttr("out_threshold", out_threshold_attr);
} }
desc.Flush(); desc.Flush();
......
...@@ -255,7 +255,7 @@ void FuseElewiseAddActPass::RemoveIntermediateOut(Graph *graph) const { ...@@ -255,7 +255,7 @@ void FuseElewiseAddActPass::RemoveIntermediateOut(Graph *graph) const {
for (auto &cur_node : graph->Nodes()) { for (auto &cur_node : graph->Nodes()) {
if (cur_node->IsVar()) continue; if (cur_node->IsVar()) continue;
if (cur_node->Name() == "fused_elemwise_add_activation") { if (cur_node->Name() == "fused_elemwise_add_activation") {
bool save_intermediate_out = BOOST_GET_CONST( bool save_intermediate_out = PADDLE_GET_CONST(
bool, cur_node->Op()->GetAttr("save_intermediate_out")); bool, cur_node->Op()->GetAttr("save_intermediate_out"));
auto intermediate_out_args = cur_node->Op()->Output("IntermediateOut"); auto intermediate_out_args = cur_node->Op()->Output("IntermediateOut");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
......
...@@ -27,8 +27,8 @@ namespace ir { ...@@ -27,8 +27,8 @@ namespace ir {
static void GetTransposeAttrsFromOp(const OpDesc &op, static void GetTransposeAttrsFromOp(const OpDesc &op,
bool *trans_x, bool *trans_x,
bool *trans_y) { bool *trans_y) {
*trans_x = BOOST_GET_CONST(bool, op.GetAttr("trans_x")); *trans_x = PADDLE_GET_CONST(bool, op.GetAttr("trans_x"));
*trans_y = BOOST_GET_CONST(bool, op.GetAttr("trans_y")); *trans_y = PADDLE_GET_CONST(bool, op.GetAttr("trans_y"));
} }
void FuseGemmEpiloguePass::ApplyImpl(ir::Graph *graph) const { void FuseGemmEpiloguePass::ApplyImpl(ir::Graph *graph) const {
...@@ -492,7 +492,7 @@ bool FuseGemmEpiloguePass::IsGemmFromLinear_( ...@@ -492,7 +492,7 @@ bool FuseGemmEpiloguePass::IsGemmFromLinear_(
"fused_transpose_Y"}) { "fused_transpose_Y"}) {
if (matmul_v2_op->HasAttr(attr_name)) { if (matmul_v2_op->HasAttr(attr_name)) {
std::vector<int> tmp_vec = std::vector<int> tmp_vec =
BOOST_GET_CONST(std::vector<int>, matmul_v2_op->GetAttr(attr_name)); PADDLE_GET_CONST(std::vector<int>, matmul_v2_op->GetAttr(attr_name));
if (tmp_vec.size() > 0) return false; if (tmp_vec.size() > 0) return false;
} }
} }
......
...@@ -111,69 +111,69 @@ class FuseAdamOpPass : public FuseOptimizerOpPass { ...@@ -111,69 +111,69 @@ class FuseAdamOpPass : public FuseOptimizerOpPass {
// Check attributions // Check attributions
// NOTE: If new attribution is added, the following code maybe need change. // NOTE: If new attribution is added, the following code maybe need change.
int op_role = BOOST_GET_CONST( int op_role = PADDLE_GET_CONST(
int, int,
adam_ops[0]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())); adam_ops[0]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName()));
float beta1 = BOOST_GET_CONST(float, adam_ops[0]->Op()->GetAttr("beta1")); float beta1 = PADDLE_GET_CONST(float, adam_ops[0]->Op()->GetAttr("beta1"));
float beta2 = BOOST_GET_CONST(float, adam_ops[0]->Op()->GetAttr("beta2")); float beta2 = PADDLE_GET_CONST(float, adam_ops[0]->Op()->GetAttr("beta2"));
float epsilon = float epsilon =
BOOST_GET_CONST(float, adam_ops[0]->Op()->GetAttr("epsilon")); PADDLE_GET_CONST(float, adam_ops[0]->Op()->GetAttr("epsilon"));
bool lazy_mode = bool lazy_mode =
BOOST_GET_CONST(bool, adam_ops[0]->Op()->GetAttr("lazy_mode")); PADDLE_GET_CONST(bool, adam_ops[0]->Op()->GetAttr("lazy_mode"));
int64_t min_row_size_to_use_multithread = BOOST_GET_CONST( int64_t min_row_size_to_use_multithread = PADDLE_GET_CONST(
int64_t, adam_ops[0]->Op()->GetAttr("min_row_size_to_use_multithread")); int64_t, adam_ops[0]->Op()->GetAttr("min_row_size_to_use_multithread"));
for (auto &adam_op : adam_ops) { for (auto &adam_op : adam_ops) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
beta1, beta1,
BOOST_GET_CONST(float, adam_op->Op()->GetAttr("beta1")), PADDLE_GET_CONST(float, adam_op->Op()->GetAttr("beta1")),
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
"All adam Op's attr(beta1) must be same, but there are two " "All adam Op's attr(beta1) must be same, but there are two "
"different " "different "
"value: %f, %f.", "value: %f, %f.",
beta1, beta1,
BOOST_GET_CONST(float, adam_op->Op()->GetAttr("beta1")))); PADDLE_GET_CONST(float, adam_op->Op()->GetAttr("beta1"))));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
beta2, beta2,
BOOST_GET_CONST(float, adam_op->Op()->GetAttr("beta2")), PADDLE_GET_CONST(float, adam_op->Op()->GetAttr("beta2")),
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
"All adam Op's attr(beta2) must be same, but there are two " "All adam Op's attr(beta2) must be same, but there are two "
"different " "different "
"value: %f, %f.", "value: %f, %f.",
beta2, beta2,
BOOST_GET_CONST(float, adam_op->Op()->GetAttr("beta2")))); PADDLE_GET_CONST(float, adam_op->Op()->GetAttr("beta2"))));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
epsilon, epsilon,
BOOST_GET_CONST(float, adam_op->Op()->GetAttr("epsilon")), PADDLE_GET_CONST(float, adam_op->Op()->GetAttr("epsilon")),
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
"All adam Op's attr(epsilon) must be same, but there are two " "All adam Op's attr(epsilon) must be same, but there are two "
"different " "different "
"value: %f, %f.", "value: %f, %f.",
epsilon, epsilon,
BOOST_GET_CONST(float, adam_op->Op()->GetAttr("epsilon")))); PADDLE_GET_CONST(float, adam_op->Op()->GetAttr("epsilon"))));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
lazy_mode, lazy_mode,
BOOST_GET_CONST(bool, adam_op->Op()->GetAttr("lazy_mode")), PADDLE_GET_CONST(bool, adam_op->Op()->GetAttr("lazy_mode")),
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
"All adam Op's attr(lazy_mode) must be same, but there are two " "All adam Op's attr(lazy_mode) must be same, but there are two "
"different " "different "
"value: %d, %d.", "value: %d, %d.",
lazy_mode, lazy_mode,
BOOST_GET_CONST(bool, adam_op->Op()->GetAttr("lazy_mode")))); PADDLE_GET_CONST(bool, adam_op->Op()->GetAttr("lazy_mode"))));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
min_row_size_to_use_multithread, min_row_size_to_use_multithread,
BOOST_GET_CONST( PADDLE_GET_CONST(
int64_t, int64_t,
adam_op->Op()->GetAttr("min_row_size_to_use_multithread")), adam_op->Op()->GetAttr("min_row_size_to_use_multithread")),
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
"All adam Op's attr(min_row_size_to_use_multithread) must be " "All adam Op's attr(min_row_size_to_use_multithread) must be "
"same, but there are two different value: %I64, %I64.", "same, but there are two different value: %I64, %I64.",
min_row_size_to_use_multithread, min_row_size_to_use_multithread,
BOOST_GET_CONST( PADDLE_GET_CONST(
int64_t, int64_t,
adam_op->Op()->GetAttr("min_row_size_to_use_multithread")))); adam_op->Op()->GetAttr("min_row_size_to_use_multithread"))));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
op_role, op_role,
BOOST_GET_CONST( PADDLE_GET_CONST(
int, int,
adam_op->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())), adam_op->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())),
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
...@@ -181,9 +181,9 @@ class FuseAdamOpPass : public FuseOptimizerOpPass { ...@@ -181,9 +181,9 @@ class FuseAdamOpPass : public FuseOptimizerOpPass {
"different " "different "
"value: %d, %d.", "value: %d, %d.",
op_role, op_role,
BOOST_GET_CONST(int, PADDLE_GET_CONST(int,
adam_op->Op()->GetAttr( adam_op->Op()->GetAttr(
OpProtoAndCheckerMaker::OpRoleAttrName())))); OpProtoAndCheckerMaker::OpRoleAttrName()))));
} }
// NOTE: fused_var is only exist in scope, so the graph doesn't have // NOTE: fused_var is only exist in scope, so the graph doesn't have
...@@ -270,54 +270,54 @@ class FuseAdamOpPass : public FuseOptimizerOpPass { ...@@ -270,54 +270,54 @@ class FuseAdamOpPass : public FuseOptimizerOpPass {
VLOG(6) << "The number of scale op is " << scale_ops.size() << "."; VLOG(6) << "The number of scale op is " << scale_ops.size() << ".";
// Check attributions // Check attributions
// NOTE: If new attribution is added, the following code maybe need change. // NOTE: If new attribution is added, the following code maybe need change.
int op_role = BOOST_GET_CONST( int op_role = PADDLE_GET_CONST(
int, int,
scale_ops[0]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())); scale_ops[0]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName()));
float scale = BOOST_GET_CONST(float, scale_ops[0]->Op()->GetAttr("scale")); float scale = PADDLE_GET_CONST(float, scale_ops[0]->Op()->GetAttr("scale"));
float bias = BOOST_GET_CONST(float, scale_ops[0]->Op()->GetAttr("bias")); float bias = PADDLE_GET_CONST(float, scale_ops[0]->Op()->GetAttr("bias"));
bool bias_after_scale = bool bias_after_scale =
BOOST_GET_CONST(bool, scale_ops[0]->Op()->GetAttr("bias_after_scale")); PADDLE_GET_CONST(bool, scale_ops[0]->Op()->GetAttr("bias_after_scale"));
for (auto &scale_op : scale_ops) { for (auto &scale_op : scale_ops) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
scale, scale,
BOOST_GET_CONST(float, scale_op->Op()->GetAttr("scale")), PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("scale")),
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
"All scale Op's attr(scale) must be same, but there are two " "All scale Op's attr(scale) must be same, but there are two "
"different " "different "
"value: %f, %f.", "value: %f, %f.",
scale, scale,
BOOST_GET_CONST(float, scale_op->Op()->GetAttr("scale")))); PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("scale"))));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
bias, bias,
BOOST_GET_CONST(float, scale_op->Op()->GetAttr("bias")), PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("bias")),
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
"All scale Op's attr(bias) must be same, but there are two " "All scale Op's attr(bias) must be same, but there are two "
"different " "different "
"value: %f, %f.", "value: %f, %f.",
bias, bias,
BOOST_GET_CONST(float, scale_op->Op()->GetAttr("bias")))); PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("bias"))));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
bias_after_scale, bias_after_scale,
BOOST_GET_CONST(bool, scale_op->Op()->GetAttr("bias_after_scale")), PADDLE_GET_CONST(bool, scale_op->Op()->GetAttr("bias_after_scale")),
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
"All scale Op's attr(bias_after_scale) must be same, but there " "All scale Op's attr(bias_after_scale) must be same, but there "
"are two different value: %d, %d.", "are two different value: %d, %d.",
bias_after_scale, bias_after_scale,
BOOST_GET_CONST(bool, PADDLE_GET_CONST(bool,
scale_op->Op()->GetAttr("bias_after_scale")))); scale_op->Op()->GetAttr("bias_after_scale"))));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
op_role, op_role,
BOOST_GET_CONST(int, PADDLE_GET_CONST(int,
scale_op->Op()->GetAttr( scale_op->Op()->GetAttr(
OpProtoAndCheckerMaker::OpRoleAttrName())), OpProtoAndCheckerMaker::OpRoleAttrName())),
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
"All scale Op's attr(op_role) must be same, but there are two " "All scale Op's attr(op_role) must be same, but there are two "
"different " "different "
"value: %d, %d.", "value: %d, %d.",
op_role, op_role,
BOOST_GET_CONST(int, PADDLE_GET_CONST(int,
scale_op->Op()->GetAttr( scale_op->Op()->GetAttr(
OpProtoAndCheckerMaker::OpRoleAttrName())))); OpProtoAndCheckerMaker::OpRoleAttrName()))));
} }
// NOTE: fused_var is only exist in scope, so the graph doesn't have // NOTE: fused_var is only exist in scope, so the graph doesn't have
......
...@@ -49,45 +49,45 @@ class FuseMomentumOpPass : public FuseOptimizerOpPass { ...@@ -49,45 +49,45 @@ class FuseMomentumOpPass : public FuseOptimizerOpPass {
// Check attributions // Check attributions
// NOTE: If new attribution is added, the following code maybe need change. // NOTE: If new attribution is added, the following code maybe need change.
int op_role = int op_role =
BOOST_GET_CONST(int, PADDLE_GET_CONST(int,
momentum_ops[0]->Op()->GetAttr( momentum_ops[0]->Op()->GetAttr(
OpProtoAndCheckerMaker::OpRoleAttrName())); OpProtoAndCheckerMaker::OpRoleAttrName()));
float mu = BOOST_GET_CONST(float, momentum_ops[0]->Op()->GetAttr("mu")); float mu = PADDLE_GET_CONST(float, momentum_ops[0]->Op()->GetAttr("mu"));
bool use_nesterov = bool use_nesterov =
BOOST_GET_CONST(bool, momentum_ops[0]->Op()->GetAttr("use_nesterov")); PADDLE_GET_CONST(bool, momentum_ops[0]->Op()->GetAttr("use_nesterov"));
for (auto &momentum_op : momentum_ops) { for (auto &momentum_op : momentum_ops) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
mu, mu,
BOOST_GET_CONST(float, momentum_op->Op()->GetAttr("mu")), PADDLE_GET_CONST(float, momentum_op->Op()->GetAttr("mu")),
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"All momentum Op's attr(mu) must be same, but there are two " "All momentum Op's attr(mu) must be same, but there are two "
"different " "different "
"value: %f, %f.", "value: %f, %f.",
mu, mu,
BOOST_GET_CONST(float, momentum_op->Op()->GetAttr("mu")))); PADDLE_GET_CONST(float, momentum_op->Op()->GetAttr("mu"))));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
use_nesterov, use_nesterov,
BOOST_GET_CONST(bool, momentum_op->Op()->GetAttr("use_nesterov")), PADDLE_GET_CONST(bool, momentum_op->Op()->GetAttr("use_nesterov")),
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"All momentum Op's attr(use_nesterov) must be same, but there " "All momentum Op's attr(use_nesterov) must be same, but there "
"are two different value: %d, %d.", "are two different value: %d, %d.",
use_nesterov, use_nesterov,
BOOST_GET_CONST(bool, PADDLE_GET_CONST(bool,
momentum_op->Op()->GetAttr("use_nesterov")))); momentum_op->Op()->GetAttr("use_nesterov"))));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
op_role, op_role,
BOOST_GET_CONST(int, PADDLE_GET_CONST(int,
momentum_op->Op()->GetAttr( momentum_op->Op()->GetAttr(
OpProtoAndCheckerMaker::OpRoleAttrName())), OpProtoAndCheckerMaker::OpRoleAttrName())),
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"All momentum Op's attr(op_role) must be same, but there are two " "All momentum Op's attr(op_role) must be same, but there are two "
"different " "different "
"value: %d, %d.", "value: %d, %d.",
op_role, op_role,
BOOST_GET_CONST(int, PADDLE_GET_CONST(int,
momentum_op->Op()->GetAttr( momentum_op->Op()->GetAttr(
OpProtoAndCheckerMaker::OpRoleAttrName())))); OpProtoAndCheckerMaker::OpRoleAttrName()))));
} }
// NOTE: fused_var is only exist in scope, so the graph doesn't have // NOTE: fused_var is only exist in scope, so the graph doesn't have
......
...@@ -48,7 +48,7 @@ class FuseSgdOpPass : public FuseOptimizerOpPass { ...@@ -48,7 +48,7 @@ class FuseSgdOpPass : public FuseOptimizerOpPass {
// NOTE: fused_var is only exist in scope, so the graph doesn't have // NOTE: fused_var is only exist in scope, so the graph doesn't have
// fused_var node. // fused_var node.
int op_role = BOOST_GET_CONST( int op_role = PADDLE_GET_CONST(
int, int,
sgd_ops[0]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())); sgd_ops[0]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName()));
VLOG(6) << "Insert sgd to graph."; VLOG(6) << "Insert sgd to graph.";
......
...@@ -63,9 +63,9 @@ FuseOptions FusePassBase::FindFuseOption(const Node& node1, ...@@ -63,9 +63,9 @@ FuseOptions FusePassBase::FindFuseOption(const Node& node1,
const Node& node2) const { const Node& node2) const {
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
bool node1_mkldnn = node1.Op()->HasAttr("use_mkldnn") && bool node1_mkldnn = node1.Op()->HasAttr("use_mkldnn") &&
BOOST_GET_CONST(bool, node1.Op()->GetAttr("use_mkldnn")); PADDLE_GET_CONST(bool, node1.Op()->GetAttr("use_mkldnn"));
bool node2_mkldnn = node2.Op()->HasAttr("use_mkldnn") && bool node2_mkldnn = node2.Op()->HasAttr("use_mkldnn") &&
BOOST_GET_CONST(bool, node2.Op()->GetAttr("use_mkldnn")); PADDLE_GET_CONST(bool, node2.Op()->GetAttr("use_mkldnn"));
if (node1_mkldnn && node2_mkldnn) if (node1_mkldnn && node2_mkldnn)
return FUSE_MKLDNN; return FUSE_MKLDNN;
else if (!node1_mkldnn && !node2_mkldnn) else if (!node1_mkldnn && !node2_mkldnn)
......
...@@ -80,26 +80,26 @@ static std::string RefineTemplateWithAttr(const std::string& op_type, ...@@ -80,26 +80,26 @@ static std::string RefineTemplateWithAttr(const std::string& op_type,
proto::AttrType attr_type = proto::AttrType attr_type =
static_cast<proto::AttrType>(it->second.index() - 1); static_cast<proto::AttrType>(it->second.index() - 1);
if (attr_type == proto::AttrType::BOOLEAN) { if (attr_type == proto::AttrType::BOOLEAN) {
bool result = BOOST_GET(bool, attr); bool result = PADDLE_GET(bool, attr);
if (result) { if (result) {
ret = "true"; ret = "true";
} else { } else {
ret = "false"; ret = "false";
} }
} else if (attr_type == proto::AttrType::INT) { } else if (attr_type == proto::AttrType::INT) {
int result = BOOST_GET(int, attr); int result = PADDLE_GET(int, attr);
str_cvt << result; str_cvt << result;
ret = str_cvt.str(); ret = str_cvt.str();
} else if (attr_type == proto::AttrType::LONG) { } else if (attr_type == proto::AttrType::LONG) {
int64_t result = BOOST_GET(int64_t, attr); int64_t result = PADDLE_GET(int64_t, attr);
str_cvt << result; str_cvt << result;
ret = str_cvt.str(); ret = str_cvt.str();
} else if (attr_type == proto::AttrType::FLOAT) { } else if (attr_type == proto::AttrType::FLOAT) {
float result = BOOST_GET(float, attr); float result = PADDLE_GET(float, attr);
str_cvt << result; str_cvt << result;
ret = str_cvt.str(); ret = str_cvt.str();
} else if (attr_type == proto::AttrType::STRING) { } else if (attr_type == proto::AttrType::STRING) {
std::string result = BOOST_GET(std::string, attr); std::string result = PADDLE_GET(std::string, attr);
ret = result; ret = result;
} }
} else { } else {
......
...@@ -104,7 +104,7 @@ static int ExtractOpRole(fusion_group::SubGraph* subgraph) { ...@@ -104,7 +104,7 @@ static int ExtractOpRole(fusion_group::SubGraph* subgraph) {
for (auto* n : subgraph->Nodes()) { for (auto* n : subgraph->Nodes()) {
if (n && n->IsOp() && n->Op()) { if (n && n->IsOp() && n->Op()) {
if (n->Op()->HasAttr(attr_name)) { if (n->Op()->HasAttr(attr_name)) {
op_roles.insert(BOOST_GET_CONST(int, n->Op()->GetAttr(attr_name))); op_roles.insert(PADDLE_GET_CONST(int, n->Op()->GetAttr(attr_name)));
} }
} }
} }
......
...@@ -272,10 +272,10 @@ void GpuCpuMapMatmul2MulPass::ApplyImpl(ir::Graph* graph) const { ...@@ -272,10 +272,10 @@ void GpuCpuMapMatmul2MulPass::ApplyImpl(ir::Graph* graph) const {
bool flag = true; bool flag = true;
bool transpose_X = bool transpose_X =
BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X"));
bool transpose_Y = bool transpose_Y =
BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y"));
float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha"));
flag = flag && !transpose_X && !transpose_Y && std::abs(alpha - 1.0) < 1e-5; flag = flag && !transpose_X && !transpose_Y && std::abs(alpha - 1.0) < 1e-5;
std::vector<int64_t> x_shape = matmul_in_x->Var()->GetShape(); std::vector<int64_t> x_shape = matmul_in_x->Var()->GetShape();
...@@ -346,9 +346,9 @@ void GpuCpuMapMatmulV2ToMulPass::ApplyImpl(ir::Graph* graph) const { ...@@ -346,9 +346,9 @@ void GpuCpuMapMatmulV2ToMulPass::ApplyImpl(ir::Graph* graph) const {
bool flag = true; bool flag = true;
bool trans_x = bool trans_x =
BOOST_GET_CONST(bool, matmul_v2_op->Op()->GetAttr("trans_x")); PADDLE_GET_CONST(bool, matmul_v2_op->Op()->GetAttr("trans_x"));
bool trans_y = bool trans_y =
BOOST_GET_CONST(bool, matmul_v2_op->Op()->GetAttr("trans_y")); PADDLE_GET_CONST(bool, matmul_v2_op->Op()->GetAttr("trans_y"));
flag = flag && !trans_x && !trans_y; flag = flag && !trans_x && !trans_y;
std::vector<int64_t> x_shape = matmul_v2_in_x->Var()->GetShape(); std::vector<int64_t> x_shape = matmul_v2_in_x->Var()->GetShape();
...@@ -494,16 +494,16 @@ void GpuCpuSqueeze2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { ...@@ -494,16 +494,16 @@ void GpuCpuSqueeze2MatmulFusePass::ApplyImpl(ir::Graph* graph) const {
size_t squeeze2_in_x_rank = (squeeze2_in_x->Var()->GetShape()).size(); size_t squeeze2_in_x_rank = (squeeze2_in_x->Var()->GetShape()).size();
std::vector<int> squeeze2_op_axes = std::vector<int> squeeze2_op_axes =
BOOST_GET_CONST(std::vector<int>, squeeze2_op->Op()->GetAttr("axes")); PADDLE_GET_CONST(std::vector<int>, squeeze2_op->Op()->GetAttr("axes"));
flag = flag && squeeze2_in_x_rank == 4 && flag = flag && squeeze2_in_x_rank == 4 &&
squeeze2_op_axes == std::vector<int>{2, 3} && squeeze2_op_axes == std::vector<int>{2, 3} &&
(matmul_in_x->outputs).size() == 1; (matmul_in_x->outputs).size() == 1;
bool transpose_X = bool transpose_X =
BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X"));
bool transpose_Y = bool transpose_Y =
BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y"));
float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha"));
size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size(); size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size();
size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size(); size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size();
flag = flag && !transpose_X && !transpose_Y && flag = flag && !transpose_X && !transpose_Y &&
...@@ -638,16 +638,16 @@ void GpuCpuReshape2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { ...@@ -638,16 +638,16 @@ void GpuCpuReshape2MatmulFusePass::ApplyImpl(ir::Graph* graph) const {
auto reshape2_in_x_shape = reshape2_in_x->Var()->GetShape(); auto reshape2_in_x_shape = reshape2_in_x->Var()->GetShape();
size_t reshape2_in_x_rank = reshape2_in_x_shape.size(); size_t reshape2_in_x_rank = reshape2_in_x_shape.size();
std::vector<int> reshape2_op_shape = std::vector<int> reshape2_op_shape =
BOOST_GET_CONST(std::vector<int>, reshape2_op->Op()->GetAttr("shape")); PADDLE_GET_CONST(std::vector<int>, reshape2_op->Op()->GetAttr("shape"));
flag = flag && reshape2_in_nums == 1 && reshape2_in_x_rank == 4 && flag = flag && reshape2_in_nums == 1 && reshape2_in_x_rank == 4 &&
reshape2_in_x_shape[2] == 1 && reshape2_in_x_shape[3] == 1 && reshape2_in_x_shape[2] == 1 && reshape2_in_x_shape[3] == 1 &&
reshape2_op_shape.size() == 2 && (matmul_in_x->outputs).size() == 1; reshape2_op_shape.size() == 2 && (matmul_in_x->outputs).size() == 1;
bool transpose_X = bool transpose_X =
BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X"));
bool transpose_Y = bool transpose_Y =
BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y"));
float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha"));
size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size(); size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size();
size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size(); size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size();
flag = flag && !transpose_X && !transpose_Y && flag = flag && !transpose_X && !transpose_Y &&
...@@ -720,7 +720,7 @@ void GpuCpuFlatten2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { ...@@ -720,7 +720,7 @@ void GpuCpuFlatten2MatmulFusePass::ApplyImpl(ir::Graph* graph) const {
auto flatten2_in_x_shape = flatten2_in_x->Var()->GetShape(); auto flatten2_in_x_shape = flatten2_in_x->Var()->GetShape();
size_t flatten2_in_x_rank = flatten2_in_x_shape.size(); size_t flatten2_in_x_rank = flatten2_in_x_shape.size();
int flatten2_axis = int flatten2_axis =
BOOST_GET_CONST(int, flatten2_op->Op()->GetAttr("axis")); PADDLE_GET_CONST(int, flatten2_op->Op()->GetAttr("axis"));
// only convert matmul to mul when the flatten2 has a single input // only convert matmul to mul when the flatten2 has a single input
// and the rank of input is 4 and the size of the output of matmul // and the rank of input is 4 and the size of the output of matmul
// is 1. // is 1.
...@@ -729,10 +729,10 @@ void GpuCpuFlatten2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { ...@@ -729,10 +729,10 @@ void GpuCpuFlatten2MatmulFusePass::ApplyImpl(ir::Graph* graph) const {
(matmul_in_x->outputs).size() == 1; (matmul_in_x->outputs).size() == 1;
bool transpose_X = bool transpose_X =
BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X"));
bool transpose_Y = bool transpose_Y =
BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y"));
float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha"));
size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size(); size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size();
size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size(); size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size();
pattern_found = pattern_found && !transpose_X && !transpose_Y && pattern_found = pattern_found && !transpose_X && !transpose_Y &&
......
...@@ -2799,7 +2799,7 @@ void patterns::ShuffleChannelPattern::operator()(PDNode *reshape1_in) { ...@@ -2799,7 +2799,7 @@ void patterns::ShuffleChannelPattern::operator()(PDNode *reshape1_in) {
auto reshape1_op = auto reshape1_op =
pattern->NewNode(reshape1_op_repr())->assert_is_op("reshape2"); pattern->NewNode(reshape1_op_repr())->assert_is_op("reshape2");
reshape1_op->assert_more([&](Node *x) { reshape1_op->assert_more([&](Node *x) {
return BOOST_GET_CONST(std::vector<int>, x->Op()->GetAttr("shape")) return PADDLE_GET_CONST(std::vector<int>, x->Op()->GetAttr("shape"))
.size() == 5; .size() == 5;
}); });
......
...@@ -163,7 +163,7 @@ struct PDNode { ...@@ -163,7 +163,7 @@ struct PDNode {
PDNode* assert_op_attr(const std::string& attr_name, const T& attr) { PDNode* assert_op_attr(const std::string& attr_name, const T& attr) {
asserts_.emplace_back([=](Node* x) { asserts_.emplace_back([=](Node* x) {
return x && x->IsOp() && x->Op()->HasAttr(attr_name) && return x && x->IsOp() && x->Op()->HasAttr(attr_name) &&
BOOST_GET_CONST(T, x->Op()->GetAttr(attr_name)) == attr; PADDLE_GET_CONST(T, x->Op()->GetAttr(attr_name)) == attr;
}); });
return this; return this;
} }
......
...@@ -33,7 +33,7 @@ std::string FormatName(const Node* node) { ...@@ -33,7 +33,7 @@ std::string FormatName(const Node* node) {
!node->Op()->HasAttr(OpProtoAndCheckerMaker::OpNamescopeAttrName())) { !node->Op()->HasAttr(OpProtoAndCheckerMaker::OpNamescopeAttrName())) {
return node->Name(); return node->Name();
} }
const std::string full_scope = BOOST_GET_CONST( const std::string full_scope = PADDLE_GET_CONST(
std::string, std::string,
node->Op()->GetAttr(OpProtoAndCheckerMaker::OpNamescopeAttrName())); node->Op()->GetAttr(OpProtoAndCheckerMaker::OpNamescopeAttrName()));
return string::Sprintf("%s%s", full_scope.c_str(), node->Name().c_str()); return string::Sprintf("%s%s", full_scope.c_str(), node->Name().c_str());
......
...@@ -45,8 +45,8 @@ void DeleteScaleOpPass::ApplyImpl(ir::Graph* graph) const { ...@@ -45,8 +45,8 @@ void DeleteScaleOpPass::ApplyImpl(ir::Graph* graph) const {
auto input_var_node = node->inputs[0]; auto input_var_node = node->inputs[0];
auto output_var_node = node->outputs[0]; auto output_var_node = node->outputs[0];
// only optimize scale *1 + 0 // only optimize scale *1 + 0
auto scale = BOOST_GET_CONST(float, op->GetAttr("scale")); auto scale = PADDLE_GET_CONST(float, op->GetAttr("scale"));
auto bias = BOOST_GET_CONST(float, op->GetAttr("bias")); auto bias = PADDLE_GET_CONST(float, op->GetAttr("bias"));
if (scale != 1 || bias != 0) { if (scale != 1 || bias != 0) {
return; return;
} }
......
...@@ -36,7 +36,7 @@ void ForwardGraphExtractPass::ApplyImpl(ir::Graph* graph) const { ...@@ -36,7 +36,7 @@ void ForwardGraphExtractPass::ApplyImpl(ir::Graph* graph) const {
if (!node->IsOp()) { if (!node->IsOp()) {
continue; continue;
} }
auto op_role = BOOST_GET_MUTABLE(int, node->Op()->GetAttr("op_role")); auto op_role = PADDLE_GET_MUTABLE(int, node->Op()->GetAttr("op_role"));
if (op_role == static_cast<int>(OpRole::kForward)) { if (op_role == static_cast<int>(OpRole::kForward)) {
all_ops[OpRole::kForward].insert(node); all_ops[OpRole::kForward].insert(node);
} else if (op_role == static_cast<int>(OpRole::kBackward)) { } else if (op_role == static_cast<int>(OpRole::kBackward)) {
......
...@@ -74,7 +74,8 @@ void InferenceDtypeTransferPass::ApplyImpl(ir::Graph* graph) const { ...@@ -74,7 +74,8 @@ void InferenceDtypeTransferPass::ApplyImpl(ir::Graph* graph) const {
auto* op_desc = node->Op(); auto* op_desc = node->Op();
if (op_desc->Type() == "popart_cast") { if (op_desc->Type() == "popart_cast") {
// Transfer the target dtype of cast Op // Transfer the target dtype of cast Op
if (BOOST_GET_CONST(std::string, op_desc->GetAttr("to")) == "FLOAT") { if (PADDLE_GET_CONST(std::string, op_desc->GetAttr("to")) ==
"FLOAT") {
op_desc->SetAttr("to", std::string("FLOAT16")); op_desc->SetAttr("to", std::string("FLOAT16"));
op_desc->Flush(); op_desc->Flush();
} }
......
...@@ -112,12 +112,12 @@ void InferenceProcessPass::ApplyImpl(ir::Graph* graph) const { ...@@ -112,12 +112,12 @@ void InferenceProcessPass::ApplyImpl(ir::Graph* graph) const {
for (auto node : graph->Nodes()) { for (auto node : graph->Nodes()) {
if (node->Name() == "feed") { if (node->Name() == "feed") {
if (node->IsOp()) { if (node->IsOp()) {
feed_list[BOOST_GET_CONST(int, node->Op()->GetAttr("col"))] = feed_list[PADDLE_GET_CONST(int, node->Op()->GetAttr("col"))] =
node->outputs[0]->Name(); node->outputs[0]->Name();
} }
} else if (node->Name() == "fetch") { } else if (node->Name() == "fetch") {
if (node->IsOp()) { if (node->IsOp()) {
fetch_list[BOOST_GET_CONST(int, node->Op()->GetAttr("col"))] = fetch_list[PADDLE_GET_CONST(int, node->Op()->GetAttr("col"))] =
node->inputs[0]->Name(); node->inputs[0]->Name();
} }
} }
......
...@@ -76,7 +76,7 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { ...@@ -76,7 +76,7 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const {
auto op = node->Op(); auto op = node->Op();
auto op_type = op->Type(); auto op_type = op->Type();
int op_role_ = BOOST_GET_CONST( int op_role_ = PADDLE_GET_CONST(
int, op->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())); int, op->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName()));
auto op_role = static_cast<OpRole>(op_role_); auto op_role = static_cast<OpRole>(op_role_);
...@@ -84,7 +84,7 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { ...@@ -84,7 +84,7 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const {
// save weight decay value from every lamb optimizer op // save weight decay value from every lamb optimizer op
if (op_type == "lamb" && op->HasAttr("weight_decay")) { if (op_type == "lamb" && op->HasAttr("weight_decay")) {
auto weight_decay_value = auto weight_decay_value =
BOOST_GET_CONST(float, op->GetAttr("weight_decay")); PADDLE_GET_CONST(float, op->GetAttr("weight_decay"));
auto params = op->Output("ParamOut"); auto params = op->Output("ParamOut");
weight_decay_vars.push_back(params[0]); weight_decay_vars.push_back(params[0]);
weight_decay_values.push_back(weight_decay_value); weight_decay_values.push_back(weight_decay_value);
...@@ -95,7 +95,7 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { ...@@ -95,7 +95,7 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const {
} }
auto op_namescope = auto op_namescope =
BOOST_GET_CONST(std::string, op->GetAttr("op_namescope")); PADDLE_GET_CONST(std::string, op->GetAttr("op_namescope"));
bool is_grad_clip = is_grad_clip_op(op_namescope); bool is_grad_clip = is_grad_clip_op(op_namescope);
// bool is_optimizer = is_optimizer_op(op_namescope); // bool is_optimizer = is_optimizer_op(op_namescope);
bool is_regularization = is_regularization_op(op_namescope); bool is_regularization = is_regularization_op(op_namescope);
...@@ -114,32 +114,33 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { ...@@ -114,32 +114,33 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const {
} else if (op_type == "momentum") { } else if (op_type == "momentum") {
auto type = std::string{"sgd"}; auto type = std::string{"sgd"};
// auto LearningRate = op->Input("LearningRate"); // auto LearningRate = op->Input("LearningRate");
auto use_nesterov = BOOST_GET_CONST(bool, op->GetAttr("use_nesterov")); auto use_nesterov = PADDLE_GET_CONST(bool, op->GetAttr("use_nesterov"));
PADDLE_ENFORCE_EQ(use_nesterov, PADDLE_ENFORCE_EQ(use_nesterov,
false, false,
platform::errors::Unimplemented( platform::errors::Unimplemented(
"ipu does not support nesterov mode.")); "ipu does not support nesterov mode."));
auto regularization_method = auto regularization_method =
BOOST_GET_CONST(std::string, op->GetAttr("regularization_method")); PADDLE_GET_CONST(std::string, op->GetAttr("regularization_method"));
PADDLE_ENFORCE_NE(regularization_method, PADDLE_ENFORCE_NE(regularization_method,
"l1_decay", "l1_decay",
platform::errors::Unimplemented( platform::errors::Unimplemented(
"ipu does not support l1_decay mode.")); "ipu does not support l1_decay mode."));
auto multi_precision = auto multi_precision =
BOOST_GET_CONST(bool, op->GetAttr("multi_precision")); PADDLE_GET_CONST(bool, op->GetAttr("multi_precision"));
PADDLE_ENFORCE_EQ(multi_precision, PADDLE_ENFORCE_EQ(multi_precision,
false, false,
platform::errors::Unimplemented( platform::errors::Unimplemented(
"ipu does not support multi_precision mode.")); "ipu does not support multi_precision mode."));
auto rescale_grad = BOOST_GET_CONST(float, op->GetAttr("rescale_grad")); auto rescale_grad =
PADDLE_GET_CONST(float, op->GetAttr("rescale_grad"));
PADDLE_ENFORCE_EQ(rescale_grad, PADDLE_ENFORCE_EQ(rescale_grad,
1.0, 1.0,
platform::errors::Unimplemented( platform::errors::Unimplemented(
"ipu does not support rescale_grad mode.")); "ipu does not support rescale_grad mode."));
auto regularization_coeff = auto regularization_coeff =
BOOST_GET_CONST(float, op->GetAttr("regularization_coeff")); PADDLE_GET_CONST(float, op->GetAttr("regularization_coeff"));
auto lr_var = op->Input("LearningRate").front(); auto lr_var = op->Input("LearningRate").front();
auto momentum = BOOST_GET_CONST(float, op->GetAttr("mu")); auto momentum = PADDLE_GET_CONST(float, op->GetAttr("mu"));
new_op.SetAttr("type", type); new_op.SetAttr("type", type);
new_op.SetAttr("lr_var", lr_var); new_op.SetAttr("lr_var", lr_var);
new_op.SetAttr("momentum", momentum); new_op.SetAttr("momentum", momentum);
...@@ -148,12 +149,12 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { ...@@ -148,12 +149,12 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const {
} else if (op_type == "adam" || op_type == "adamw") { } else if (op_type == "adam" || op_type == "adamw") {
auto type = std::string{"adam"}; auto type = std::string{"adam"};
auto lr_var = op->Input("LearningRate").front(); auto lr_var = op->Input("LearningRate").front();
auto beta1 = BOOST_GET_CONST(float, op->GetAttr("beta1")); auto beta1 = PADDLE_GET_CONST(float, op->GetAttr("beta1"));
auto beta2 = BOOST_GET_CONST(float, op->GetAttr("beta2")); auto beta2 = PADDLE_GET_CONST(float, op->GetAttr("beta2"));
auto epsilon = BOOST_GET_CONST(float, op->GetAttr("epsilon")); auto epsilon = PADDLE_GET_CONST(float, op->GetAttr("epsilon"));
auto lazy_mode = BOOST_GET_CONST(bool, op->GetAttr("lazy_mode")); auto lazy_mode = PADDLE_GET_CONST(bool, op->GetAttr("lazy_mode"));
auto multi_precision = auto multi_precision =
BOOST_GET_CONST(bool, op->GetAttr("multi_precision")); PADDLE_GET_CONST(bool, op->GetAttr("multi_precision"));
PADDLE_ENFORCE_EQ(lazy_mode, PADDLE_ENFORCE_EQ(lazy_mode,
false, false,
platform::errors::Unimplemented( platform::errors::Unimplemented(
...@@ -180,9 +181,9 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { ...@@ -180,9 +181,9 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const {
} else if (op_type == "adamax") { } else if (op_type == "adamax") {
auto type = std::string{"adam"}; auto type = std::string{"adam"};
auto lr_var = op->Input("LearningRate").front(); auto lr_var = op->Input("LearningRate").front();
auto beta1 = BOOST_GET_CONST(float, op->GetAttr("beta1")); auto beta1 = PADDLE_GET_CONST(float, op->GetAttr("beta1"));
auto beta2 = BOOST_GET_CONST(float, op->GetAttr("beta2")); auto beta2 = PADDLE_GET_CONST(float, op->GetAttr("beta2"));
auto epsilon = BOOST_GET_CONST(float, op->GetAttr("epsilon")); auto epsilon = PADDLE_GET_CONST(float, op->GetAttr("epsilon"));
new_op.SetAttr("type", type); new_op.SetAttr("type", type);
new_op.SetAttr("lr_var", lr_var); new_op.SetAttr("lr_var", lr_var);
new_op.SetAttr("weight_decay", 0.0f); new_op.SetAttr("weight_decay", 0.0f);
...@@ -196,10 +197,11 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { ...@@ -196,10 +197,11 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const {
// use decay mode // use decay mode
auto type = std::string{"adam"}; auto type = std::string{"adam"};
auto lr_var = op->Input("LearningRate").front(); auto lr_var = op->Input("LearningRate").front();
auto weight_decay = BOOST_GET_CONST(float, op->GetAttr("weight_decay")); auto weight_decay =
auto beta1 = BOOST_GET_CONST(float, op->GetAttr("beta1")); PADDLE_GET_CONST(float, op->GetAttr("weight_decay"));
auto beta2 = BOOST_GET_CONST(float, op->GetAttr("beta2")); auto beta1 = PADDLE_GET_CONST(float, op->GetAttr("beta1"));
auto epsilon = BOOST_GET_CONST(float, op->GetAttr("epsilon")); auto beta2 = PADDLE_GET_CONST(float, op->GetAttr("beta2"));
auto epsilon = PADDLE_GET_CONST(float, op->GetAttr("epsilon"));
new_op.SetAttr("type", type); new_op.SetAttr("type", type);
new_op.SetAttr("lr_var", lr_var); new_op.SetAttr("lr_var", lr_var);
new_op.SetAttr("weight_decay", weight_decay); new_op.SetAttr("weight_decay", weight_decay);
...@@ -212,8 +214,8 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { ...@@ -212,8 +214,8 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const {
} else if (op_type == "adadelta") { } else if (op_type == "adadelta") {
// NO LearningRate // NO LearningRate
auto type = std::string{"adaptive"}; auto type = std::string{"adaptive"};
auto rho = BOOST_GET_CONST(float, op->GetAttr("rho")); auto rho = PADDLE_GET_CONST(float, op->GetAttr("rho"));
auto epsilon = BOOST_GET_CONST(float, op->GetAttr("epsilon")); auto epsilon = PADDLE_GET_CONST(float, op->GetAttr("epsilon"));
new_op.SetAttr("type", type); new_op.SetAttr("type", type);
new_op.SetAttr("weight_decay", 0.0f); new_op.SetAttr("weight_decay", 0.0f);
new_op.SetAttr("alpha", rho); new_op.SetAttr("alpha", rho);
...@@ -225,7 +227,7 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { ...@@ -225,7 +227,7 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const {
} else if (op_type == "adagrad") { } else if (op_type == "adagrad") {
auto type = std::string{"adaptive"}; auto type = std::string{"adaptive"};
auto lr_var = op->Input("LearningRate").front(); auto lr_var = op->Input("LearningRate").front();
auto epsilon = BOOST_GET_CONST(float, op->GetAttr("epsilon")); auto epsilon = PADDLE_GET_CONST(float, op->GetAttr("epsilon"));
new_op.SetAttr("type", type); new_op.SetAttr("type", type);
new_op.SetAttr("lr_var", lr_var); new_op.SetAttr("lr_var", lr_var);
new_op.SetAttr("weight_decay", 0.0f); new_op.SetAttr("weight_decay", 0.0f);
...@@ -239,10 +241,10 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { ...@@ -239,10 +241,10 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const {
} else if (op_type == "rmsprop") { } else if (op_type == "rmsprop") {
auto type = std::string{"adaptive"}; auto type = std::string{"adaptive"};
auto lr_var = op->Input("LearningRate").front(); auto lr_var = op->Input("LearningRate").front();
auto epsilon = BOOST_GET_CONST(float, op->GetAttr("epsilon")); auto epsilon = PADDLE_GET_CONST(float, op->GetAttr("epsilon"));
auto decay = BOOST_GET_CONST(float, op->GetAttr("decay")); auto decay = PADDLE_GET_CONST(float, op->GetAttr("decay"));
auto momentum = BOOST_GET_CONST(float, op->GetAttr("momentum")); auto momentum = PADDLE_GET_CONST(float, op->GetAttr("momentum"));
auto centered = BOOST_GET_CONST(bool, op->GetAttr("centered")); auto centered = PADDLE_GET_CONST(bool, op->GetAttr("centered"));
new_op.SetAttr("type", type); new_op.SetAttr("type", type);
new_op.SetAttr("weight_decay", 0.0f); new_op.SetAttr("weight_decay", 0.0f);
new_op.SetAttr("alpha", decay); new_op.SetAttr("alpha", decay);
...@@ -258,11 +260,11 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { ...@@ -258,11 +260,11 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const {
} }
} else if (is_regularization && op_type == "scale") { } else if (is_regularization && op_type == "scale") {
// set weight_decay for L2Decay // set weight_decay for L2Decay
auto scale = BOOST_GET_CONST(float, op->GetAttr("scale")); auto scale = PADDLE_GET_CONST(float, op->GetAttr("scale"));
new_op.SetAttr("weight_decay", scale); new_op.SetAttr("weight_decay", scale);
} else if (is_grad_clip && op_type == "fill_constant") { } else if (is_grad_clip && op_type == "fill_constant") {
// set clip_norm for ClipGradByGlobalNorm // set clip_norm for ClipGradByGlobalNorm
auto value = BOOST_GET_CONST(float, op->GetAttr("value")); auto value = PADDLE_GET_CONST(float, op->GetAttr("value"));
new_op.SetAttr("clip_norm", value); new_op.SetAttr("clip_norm", value);
} else if (ignored_ops.count(op_type)) { } else if (ignored_ops.count(op_type)) {
VLOG(10) << "Ignore optimizer releated op: " << op_type; VLOG(10) << "Ignore optimizer releated op: " << op_type;
......
...@@ -32,7 +32,7 @@ void IpuOptimizerStateAlignPass::ApplyImpl(ir::Graph* graph) const { ...@@ -32,7 +32,7 @@ void IpuOptimizerStateAlignPass::ApplyImpl(ir::Graph* graph) const {
for (auto* node : graph->Nodes()) { for (auto* node : graph->Nodes()) {
if (node->IsOp() && node->Op()) { if (node->IsOp() && node->Op()) {
int op_role = BOOST_GET_CONST( int op_role = PADDLE_GET_CONST(
int, int,
node->Op()->GetAttr( node->Op()->GetAttr(
framework::OpProtoAndCheckerMaker::OpRoleAttrName())); framework::OpProtoAndCheckerMaker::OpRoleAttrName()));
...@@ -42,7 +42,7 @@ void IpuOptimizerStateAlignPass::ApplyImpl(ir::Graph* graph) const { ...@@ -42,7 +42,7 @@ void IpuOptimizerStateAlignPass::ApplyImpl(ir::Graph* graph) const {
if (inputs.count(platform::ipu::sBeta1Pow)) { if (inputs.count(platform::ipu::sBeta1Pow)) {
auto var = scope_->GetVar(inputs.at(platform::ipu::sBeta1Pow)[0]); auto var = scope_->GetVar(inputs.at(platform::ipu::sBeta1Pow)[0]);
auto data = var->GetMutable<framework::LoDTensor>()->data<float>(); auto data = var->GetMutable<framework::LoDTensor>()->data<float>();
auto beta = BOOST_GET_CONST( auto beta = PADDLE_GET_CONST(
float, node->Op()->GetAttr(platform::ipu::sBeta1)); float, node->Op()->GetAttr(platform::ipu::sBeta1));
// ensure current save with beta1pow, rather than step. // ensure current save with beta1pow, rather than step.
......
...@@ -159,12 +159,12 @@ TEST(IsTestPass, basic) { ...@@ -159,12 +159,12 @@ TEST(IsTestPass, basic) {
for (auto* node : graph->Nodes()) { for (auto* node : graph->Nodes()) {
if (node->IsOp()) { if (node->IsOp()) {
auto* op = node->Op(); auto* op = node->Op();
auto op_name = BOOST_GET_CONST(std::string, op->GetAttr("name")); auto op_name = PADDLE_GET_CONST(std::string, op->GetAttr("name"));
if (op_name == "conv3") { if (op_name == "conv3") {
ASSERT_FALSE(op->HasAttr("is_test")); ASSERT_FALSE(op->HasAttr("is_test"));
} else { } else {
ASSERT_TRUE(op->HasAttr("is_test")); ASSERT_TRUE(op->HasAttr("is_test"));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("is_test"))); EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("is_test")));
} }
} }
} }
......
...@@ -57,14 +57,14 @@ bool validateReduceOpAttrs(const Node* node, ...@@ -57,14 +57,14 @@ bool validateReduceOpAttrs(const Node* node,
const auto* op = node->Op(); const auto* op = node->Op();
if (op->HasAttr("reduce_all")) { if (op->HasAttr("reduce_all")) {
EXPECT_TRUE( EXPECT_TRUE(
!BOOST_GET_CONST(bool, op->GetAttr("reduce_all")), !PADDLE_GET_CONST(bool, op->GetAttr("reduce_all")),
::paddle::string::Sprintf( ::paddle::string::Sprintf(
"The LayerNorm fusion %s" "The LayerNorm fusion %s"
"reduction must have \'reduce_all\' attribute set to false.", "reduction must have \'reduce_all\' attribute set to false.",
name)); name));
} }
if (op->HasAttr("dim")) { if (op->HasAttr("dim")) {
auto dims = BOOST_GET_CONST(std::vector<int>, op->GetAttr("dim")); auto dims = PADDLE_GET_CONST(std::vector<int>, op->GetAttr("dim"));
if (dims.size() == x_shape.size()) return false; if (dims.size() == x_shape.size()) return false;
if (1 == dims.size() && -1 == dims.front()) return true; if (1 == dims.size() && -1 == dims.front()) return true;
...@@ -289,18 +289,18 @@ void LayerNormFusePass::ApplyImpl(Graph* graph) const { ...@@ -289,18 +289,18 @@ void LayerNormFusePass::ApplyImpl(Graph* graph) const {
CHECK_TRUE(validateReduceOpAttrs(std_dev, x_shape, "std_dev mean"), CHECK_TRUE(validateReduceOpAttrs(std_dev, x_shape, "std_dev mean"),
"Validation of standard deviation node failed."); "Validation of standard deviation node failed.");
bool keep_dim = BOOST_GET_CONST(bool, x_mean->Op()->GetAttr("keep_dim")); bool keep_dim = PADDLE_GET_CONST(bool, x_mean->Op()->GetAttr("keep_dim"));
std::vector<int> mean_dim = std::vector<int> mean_dim =
BOOST_GET_CONST(std::vector<int>, x_mean->Op()->GetAttr("dim")); PADDLE_GET_CONST(std::vector<int>, x_mean->Op()->GetAttr("dim"));
std::vector<int> std_mean_dim = std::vector<int> std_mean_dim =
BOOST_GET_CONST(std::vector<int>, std_dev->Op()->GetAttr("dim")); PADDLE_GET_CONST(std::vector<int>, std_dev->Op()->GetAttr("dim"));
if (mean_dim != std_mean_dim) { if (mean_dim != std_mean_dim) {
LOG(WARNING) << "The LayerNorm dim of all mean must be same"; LOG(WARNING) << "The LayerNorm dim of all mean must be same";
return; return;
} }
if (!keep_dim) { if (!keep_dim) {
int sub_axis = BOOST_GET_CONST(int, x_sub_mean->Op()->GetAttr("axis")); int sub_axis = PADDLE_GET_CONST(int, x_sub_mean->Op()->GetAttr("axis"));
int div_axis = BOOST_GET_CONST(int, division->Op()->GetAttr("axis")); int div_axis = PADDLE_GET_CONST(int, division->Op()->GetAttr("axis"));
if (sub_axis != 0 || div_axis != 0) return; if (sub_axis != 0 || div_axis != 0) return;
} }
......
...@@ -200,7 +200,7 @@ ir::Node* LockFreeOptimizePass::CreateNewSGDNode( ...@@ -200,7 +200,7 @@ ir::Node* LockFreeOptimizePass::CreateNewSGDNode(
new_desc.SetInput("Grad", std::vector<std::string>({grad_node->Name()})); new_desc.SetInput("Grad", std::vector<std::string>({grad_node->Name()}));
new_desc.SetOutput("ParamOut", old_desc->Output("ParamOut")); new_desc.SetOutput("ParamOut", old_desc->Output("ParamOut"));
std::vector<std::string> op_role_vars = BOOST_GET_CONST( std::vector<std::string> op_role_vars = PADDLE_GET_CONST(
std::vector<std::string>, std::vector<std::string>,
new_desc.GetAttr(framework::OpProtoAndCheckerMaker::OpRoleVarAttrName())); new_desc.GetAttr(framework::OpProtoAndCheckerMaker::OpRoleVarAttrName()));
// replace the second op role var, because the grad name was // replace the second op role var, because the grad name was
......
...@@ -131,16 +131,16 @@ void MatmulScaleFusePass::ApplyImpl(ir::Graph* graph) const { ...@@ -131,16 +131,16 @@ void MatmulScaleFusePass::ApplyImpl(ir::Graph* graph) const {
GET_IR_NODE_FROM_SUBGRAPH(scale_out, scale_out, matmul_scale_pattern); GET_IR_NODE_FROM_SUBGRAPH(scale_out, scale_out, matmul_scale_pattern);
auto* scope = param_scope(); auto* scope = param_scope();
float bias = BOOST_GET_CONST(float, scale_op->Op()->GetAttr("bias")); float bias = PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("bias"));
if (std::abs(bias) > 1e-5) return; if (std::abs(bias) > 1e-5) return;
if (!IsCompat(subgraph, g)) { if (!IsCompat(subgraph, g)) {
LOG(WARNING) << "matmul_scale_fuse_pass in op compat failed."; LOG(WARNING) << "matmul_scale_fuse_pass in op compat failed.";
return; return;
} }
float scale = BOOST_GET_CONST(float, scale_op->Op()->GetAttr("scale")); float scale = PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("scale"));
float matmul_alpha = float matmul_alpha =
BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha"));
auto const& names = scale_op->Op()->InputNames(); auto const& names = scale_op->Op()->InputNames();
bool has_scale_tensor = bool has_scale_tensor =
std::find(names.begin(), names.end(), "ScaleTensor") != names.end(); std::find(names.begin(), names.end(), "ScaleTensor") != names.end();
...@@ -195,14 +195,14 @@ void MatmulV2ScaleFusePass::ApplyImpl(ir::Graph* graph) const { ...@@ -195,14 +195,14 @@ void MatmulV2ScaleFusePass::ApplyImpl(ir::Graph* graph) const {
GET_IR_NODE_FROM_SUBGRAPH(scale_out, scale_out, matmul_v2_scale_pattern); GET_IR_NODE_FROM_SUBGRAPH(scale_out, scale_out, matmul_v2_scale_pattern);
auto* scope = param_scope(); auto* scope = param_scope();
float bias = BOOST_GET_CONST(float, scale_op->Op()->GetAttr("bias")); float bias = PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("bias"));
if (std::abs(bias) > 1e-5) return; if (std::abs(bias) > 1e-5) return;
if (!IsCompat(subgraph, g)) { if (!IsCompat(subgraph, g)) {
LOG(WARNING) << "matmul_v2_scale_fuse_pass in op compat failed."; LOG(WARNING) << "matmul_v2_scale_fuse_pass in op compat failed.";
return; return;
} }
float scale = BOOST_GET_CONST(float, scale_op->Op()->GetAttr("scale")); float scale = PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("scale"));
auto const& names = scale_op->Op()->InputNames(); auto const& names = scale_op->Op()->InputNames();
bool has_scale_tensor = bool has_scale_tensor =
std::find(names.begin(), names.end(), "ScaleTensor") != names.end(); std::find(names.begin(), names.end(), "ScaleTensor") != names.end();
......
...@@ -208,14 +208,14 @@ TEST(test_reference_count_pass, test_no_need_buffer_var_shrink) { ...@@ -208,14 +208,14 @@ TEST(test_reference_count_pass, test_no_need_buffer_var_shrink) {
for (auto use_cuda : use_cuda_list) { for (auto use_cuda : use_cuda_list) {
ReferenceCountPassTestHelper helper(program, use_cuda); ReferenceCountPassTestHelper helper(program, use_cuda);
ASSERT_TRUE(helper.IsLastLivedOps(x0, {"scale"})); ASSERT_TRUE(helper.IsLastLivedOps(x0, {"scale"}));
ASSERT_EQ( ASSERT_EQ(PADDLE_GET_CONST(float,
BOOST_GET_CONST(float, helper.LastLivedOps(x0)[0]->Attrs().at("scale")), helper.LastLivedOps(x0)[0]->Attrs().at("scale")),
1.0f); 1.0f);
ASSERT_TRUE(helper.IsLastLivedOps(x1, {"scale"})); ASSERT_TRUE(helper.IsLastLivedOps(x1, {"scale"}));
ASSERT_EQ( ASSERT_EQ(PADDLE_GET_CONST(float,
BOOST_GET_CONST(float, helper.LastLivedOps(x1)[0]->Attrs().at("scale")), helper.LastLivedOps(x1)[0]->Attrs().at("scale")),
3.0f); 3.0f);
ASSERT_TRUE(helper.IsLastLivedOps(x2, {"elementwise_mul"})); ASSERT_TRUE(helper.IsLastLivedOps(x2, {"elementwise_mul"}));
ASSERT_TRUE(helper.IsLastLivedOps(x3, {"elementwise_add_grad"})); ASSERT_TRUE(helper.IsLastLivedOps(x3, {"elementwise_add_grad"}));
......
...@@ -112,7 +112,7 @@ void FuseBatchNormActOneDNNPass::FuseBatchNormAct( ...@@ -112,7 +112,7 @@ void FuseBatchNormActOneDNNPass::FuseBatchNormAct(
auto *bn_op = batch_norm->Op(); auto *bn_op = batch_norm->Op();
if (bn_op->HasAttr("trainable_statistics")) { if (bn_op->HasAttr("trainable_statistics")) {
PADDLE_ENFORCE( PADDLE_ENFORCE(
!BOOST_GET_CONST(bool, bn_op->GetAttr("trainable_statistics")), !PADDLE_GET_CONST(bool, bn_op->GetAttr("trainable_statistics")),
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
"The BatchNorm+Act fusion may happen only when mean and variance " "The BatchNorm+Act fusion may happen only when mean and variance "
"are not calculated by current batch statistics.")); "are not calculated by current batch statistics."));
...@@ -120,7 +120,7 @@ void FuseBatchNormActOneDNNPass::FuseBatchNormAct( ...@@ -120,7 +120,7 @@ void FuseBatchNormActOneDNNPass::FuseBatchNormAct(
if (bn_op->HasAttr("is_test")) { if (bn_op->HasAttr("is_test")) {
PADDLE_ENFORCE( PADDLE_ENFORCE(
BOOST_GET_CONST(bool, bn_op->GetAttr("is_test")), PADDLE_GET_CONST(bool, bn_op->GetAttr("is_test")),
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
"The BatchNorm+Act fusion may happen only during inference.")); "The BatchNorm+Act fusion may happen only during inference."));
} }
......
...@@ -101,11 +101,11 @@ TEST(FuseBatchNormActOneDNNPass, FuseIsTest) { ...@@ -101,11 +101,11 @@ TEST(FuseBatchNormActOneDNNPass, FuseIsTest) {
if (node->IsOp() && node->Op()->Type() == "batch_norm") { if (node->IsOp() && node->Op()->Type() == "batch_norm") {
const auto* op = node->Op(); const auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn")); ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")));
ASSERT_TRUE(op->HasAttr("fuse_with_relu")); ASSERT_TRUE(op->HasAttr("fuse_with_relu"));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("fuse_with_relu"))); EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("fuse_with_relu")));
ASSERT_TRUE(op->HasAttr("trainable_statistics")); ASSERT_TRUE(op->HasAttr("trainable_statistics"));
EXPECT_FALSE(BOOST_GET_CONST(bool, op->GetAttr("trainable_statistics"))); EXPECT_FALSE(PADDLE_GET_CONST(bool, op->GetAttr("trainable_statistics")));
} }
} }
} }
......
...@@ -347,7 +347,7 @@ void ComputePropagateScalesMkldnnPass::UpdateScaleOpInScale( ...@@ -347,7 +347,7 @@ void ComputePropagateScalesMkldnnPass::UpdateScaleOpInScale(
auto pair = iter->second; auto pair = iter->second;
const auto tensor = pair.second; const auto tensor = pair.second;
const auto scale = BOOST_GET_CONST(float, op_node->Op()->GetAttr("scale")); const auto scale = PADDLE_GET_CONST(float, op_node->Op()->GetAttr("scale"));
Tensor tmp_tensor; Tensor tmp_tensor;
tmp_tensor.Resize(tensor.dims()); tmp_tensor.Resize(tensor.dims());
auto* data = tmp_tensor.mutable_data<float>(platform::CPUPlace()); auto* data = tmp_tensor.mutable_data<float>(platform::CPUPlace());
......
...@@ -72,9 +72,10 @@ void ConvActivationMkldnnFusePass::FuseConvAct(Graph* graph, ...@@ -72,9 +72,10 @@ void ConvActivationMkldnnFusePass::FuseConvAct(Graph* graph,
} }
if (act_type == "gelu" && activation->Op()->HasAttr("approximate")) { if (act_type == "gelu" && activation->Op()->HasAttr("approximate")) {
act_type = BOOST_GET_CONST(bool, activation->Op()->GetAttr("approximate")) act_type =
? "gelu_tanh" PADDLE_GET_CONST(bool, activation->Op()->GetAttr("approximate"))
: "gelu_erf"; ? "gelu_tanh"
: "gelu_erf";
conv_op->SetAttr("fuse_alpha", 0.0f); conv_op->SetAttr("fuse_alpha", 0.0f);
conv_op->SetAttr("fuse_beta", 0.0f); conv_op->SetAttr("fuse_beta", 0.0f);
} }
......
...@@ -166,8 +166,8 @@ void MainTest(std::string activation) { ...@@ -166,8 +166,8 @@ void MainTest(std::string activation) {
if (node->IsOp() && node->Op()->Type() == "conv2d") { if (node->IsOp() && node->Op()->Type() == "conv2d") {
auto* op = node->Op(); auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn")); ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")));
auto op_name = BOOST_GET_CONST(std::string, op->GetAttr("name")); auto op_name = PADDLE_GET_CONST(std::string, op->GetAttr("name"));
if (op->GetAttrIfExists<std::string>("fuse_activation") == activation) { if (op->GetAttrIfExists<std::string>("fuse_activation") == activation) {
++conv_activation_count; ++conv_activation_count;
} }
......
...@@ -142,9 +142,9 @@ void MainTest(bool convWithExistingBias) { ...@@ -142,9 +142,9 @@ void MainTest(bool convWithExistingBias) {
if (node->IsOp() && node->Op()->Type() == "conv2d") { if (node->IsOp() && node->Op()->Type() == "conv2d") {
auto* op = node->Op(); auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn")); ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")));
// check if "conv" convolution is fused // check if "conv" convolution is fused
auto op_name = BOOST_GET_CONST(std::string, op->GetAttr("name")); auto op_name = PADDLE_GET_CONST(std::string, op->GetAttr("name"));
if (op_name == "conv") { if (op_name == "conv") {
auto input_names = op->InputNames(); auto input_names = op->InputNames();
ASSERT_TRUE(std::find(input_names.begin(), input_names.end(), "Bias") != ASSERT_TRUE(std::find(input_names.begin(), input_names.end(), "Bias") !=
......
...@@ -122,7 +122,7 @@ void MainTest(const ProgramDesc& prog, bool fuse_relu) { ...@@ -122,7 +122,7 @@ void MainTest(const ProgramDesc& prog, bool fuse_relu) {
if (op->Type() == "conv2d") { if (op->Type() == "conv2d") {
ASSERT_TRUE(op->HasAttr("fuse_activation")); ASSERT_TRUE(op->HasAttr("fuse_activation"));
bool fuse_relu_attr = bool fuse_relu_attr =
(BOOST_GET_CONST(std::string, op->GetAttr("fuse_activation")) == (PADDLE_GET_CONST(std::string, op->GetAttr("fuse_activation")) ==
"relu"); "relu");
EXPECT_EQ(fuse_relu, fuse_relu_attr); EXPECT_EQ(fuse_relu, fuse_relu_attr);
} else if (op->Type() == "relu") { } else if (op->Type() == "relu") {
......
...@@ -449,9 +449,9 @@ void CPUQuantizePass::QuantizeConv(Graph* graph, ...@@ -449,9 +449,9 @@ void CPUQuantizePass::QuantizeConv(Graph* graph,
if (conv_op->Op()->GetAttrIfExists<std::string>("fuse_activation") == if (conv_op->Op()->GetAttrIfExists<std::string>("fuse_activation") ==
"relu6") { "relu6") {
float scale_out = float scale_out =
BOOST_GET_CONST(float, conv_op->Op()->GetAttr("Scale_out")); PADDLE_GET_CONST(float, conv_op->Op()->GetAttr("Scale_out"));
float threshold = float threshold =
BOOST_GET_CONST(float, conv_op->Op()->GetAttr("fuse_alpha")); PADDLE_GET_CONST(float, conv_op->Op()->GetAttr("fuse_alpha"));
conv_op->Op()->SetAttr("fuse_alpha", scale_out * threshold); conv_op->Op()->SetAttr("fuse_alpha", scale_out * threshold);
} }
......
...@@ -798,14 +798,15 @@ void MainTestMultiGru(int layers) { ...@@ -798,14 +798,15 @@ void MainTestMultiGru(int layers) {
if (op->Type() == "multi_gru") { if (op->Type() == "multi_gru") {
multi_gru_nodes_count++; multi_gru_nodes_count++;
auto op_name = BOOST_GET_CONST(std::string, op->GetAttr("name")); auto op_name = PADDLE_GET_CONST(std::string, op->GetAttr("name"));
EXPECT_EQ(BOOST_GET_CONST(float, op->GetAttr("Scale_data")), scale) EXPECT_EQ(PADDLE_GET_CONST(float, op->GetAttr("Scale_data")), scale)
<< "Scale_data for node '" + op_name + "'."; << "Scale_data for node '" + op_name + "'.";
EXPECT_EQ(BOOST_GET_CONST(float, op->GetAttr("Shift_data")), shift) EXPECT_EQ(PADDLE_GET_CONST(float, op->GetAttr("Shift_data")), shift)
<< "Shift_data for node '" + op_name + "'."; << "Shift_data for node '" + op_name + "'.";
EXPECT_EQ(op->Input("Scale_weights").size(), 2u * layers) EXPECT_EQ(op->Input("Scale_weights").size(), 2u * layers)
<< "Scale_weights for node '" + op_name + "'."; << "Scale_weights for node '" + op_name + "'.";
EXPECT_EQ(BOOST_GET_CONST(bool, op->GetAttr("force_fp32_output")), true) EXPECT_EQ(PADDLE_GET_CONST(bool, op->GetAttr("force_fp32_output")),
true)
<< "force_fp32_output for node '" + op_name + "'."; << "force_fp32_output for node '" + op_name + "'.";
} else if (op->Type() == "quantize") { } else if (op->Type() == "quantize") {
quantize_nodes_count++; quantize_nodes_count++;
......
...@@ -179,9 +179,9 @@ void CPUQuantizeSquashPass::DequantQuantSquash( ...@@ -179,9 +179,9 @@ void CPUQuantizeSquashPass::DequantQuantSquash(
auto* next_op_desc = next_op->Op(); auto* next_op_desc = next_op->Op();
float dequant_scale = float dequant_scale =
BOOST_GET_CONST(float, dequant_op->Op()->GetAttr("Scale")); PADDLE_GET_CONST(float, dequant_op->Op()->GetAttr("Scale"));
float quant_scale = float quant_scale =
BOOST_GET_CONST(float, quant_op->Op()->GetAttr("Scale")); PADDLE_GET_CONST(float, quant_op->Op()->GetAttr("Scale"));
float dequant_shift = dequant_op->Op()->GetAttrIfExists<float>("Shift"); float dequant_shift = dequant_op->Op()->GetAttrIfExists<float>("Shift");
float quant_shift = quant_op->Op()->GetAttrIfExists<float>("Shift"); float quant_shift = quant_op->Op()->GetAttrIfExists<float>("Shift");
PADDLE_ENFORCE_NE( PADDLE_ENFORCE_NE(
...@@ -275,7 +275,7 @@ void CPUQuantizeSquashPass::OpRequantSquash(Graph* graph) const { ...@@ -275,7 +275,7 @@ void CPUQuantizeSquashPass::OpRequantSquash(Graph* graph) const {
requant_in->Name())); requant_in->Name()));
float requant_scale_out = float requant_scale_out =
BOOST_GET_CONST(float, requant_op->Op()->GetAttr("Scale_out")); PADDLE_GET_CONST(float, requant_op->Op()->GetAttr("Scale_out"));
any_op->Op()->SetAttr("Scale_out", requant_scale_out); any_op->Op()->SetAttr("Scale_out", requant_scale_out);
any_op->Op()->SetOutput(any_op_output_name, any_op->Op()->SetOutput(any_op_output_name,
std::vector<std::string>({requant_out->Name()})); std::vector<std::string>({requant_out->Name()}));
...@@ -488,10 +488,10 @@ void CPUQuantizeSquashPass::DequantScaleSquash(Graph* graph) const { ...@@ -488,10 +488,10 @@ void CPUQuantizeSquashPass::DequantScaleSquash(Graph* graph) const {
GET_IR_NODE_FROM_SUBGRAPH(scale_out, scale_out, dequant_scale_pattern); GET_IR_NODE_FROM_SUBGRAPH(scale_out, scale_out, dequant_scale_pattern);
if (dequant_out->outputs.size() == 1 && if (dequant_out->outputs.size() == 1 &&
BOOST_GET_CONST(float, scale_op->Op()->GetAttr("bias")) == 0.0f) { PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("bias")) == 0.0f) {
auto dequant_scale = dequant_op->Op()->GetAttrIfExists<float>("Scale"); auto dequant_scale = dequant_op->Op()->GetAttrIfExists<float>("Scale");
float scale_scale = float scale_scale =
BOOST_GET_CONST(float, scale_op->Op()->GetAttr("scale")); PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("scale"));
PADDLE_ENFORCE_GT(dequant_scale, PADDLE_ENFORCE_GT(dequant_scale,
0.0f, 0.0f,
...@@ -540,10 +540,10 @@ void CPUQuantizeSquashPass::ScaleQuantSquash(Graph* graph) const { ...@@ -540,10 +540,10 @@ void CPUQuantizeSquashPass::ScaleQuantSquash(Graph* graph) const {
GET_IR_NODE_FROM_SUBGRAPH(quant_op, quant_op, scale_quant_pattern); GET_IR_NODE_FROM_SUBGRAPH(quant_op, quant_op, scale_quant_pattern);
if (quant_in->outputs.size() == 1 && if (quant_in->outputs.size() == 1 &&
BOOST_GET_CONST(float, scale_op->Op()->GetAttr("bias")) == 0.0f) { PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("bias")) == 0.0f) {
auto quant_scale = quant_op->Op()->GetAttrIfExists<float>("Scale"); auto quant_scale = quant_op->Op()->GetAttrIfExists<float>("Scale");
float scale_scale = float scale_scale =
BOOST_GET_CONST(float, scale_op->Op()->GetAttr("scale")); PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("scale"));
PADDLE_ENFORCE_GT( PADDLE_ENFORCE_GT(
quant_scale, quant_scale,
......
...@@ -756,8 +756,8 @@ void EqualScaleTest(const ProgramDesc& prog, ...@@ -756,8 +756,8 @@ void EqualScaleTest(const ProgramDesc& prog,
for (auto* node : graph->Nodes()) { for (auto* node : graph->Nodes()) {
if (node->IsOp() && if (node->IsOp() &&
BOOST_GET_CONST(std::string, node->Op()->GetAttr("name")) == op_name) { PADDLE_GET_CONST(std::string, node->Op()->GetAttr("name")) == op_name) {
float op_scale = BOOST_GET_CONST(float, node->Op()->GetAttr(scale_name)); float op_scale = PADDLE_GET_CONST(float, node->Op()->GetAttr(scale_name));
EXPECT_EQ(op_scale, scale); EXPECT_EQ(op_scale, scale);
} }
} }
...@@ -775,10 +775,10 @@ void CheckRequantScalesTest(const ProgramDesc& prog, ...@@ -775,10 +775,10 @@ void CheckRequantScalesTest(const ProgramDesc& prog,
for (auto* node : graph->Nodes()) { for (auto* node : graph->Nodes()) {
if (node->IsOp() && node->Op()->Type() == "requantize") { if (node->IsOp() && node->Op()->Type() == "requantize") {
float op_scale_in = float op_scale_in =
BOOST_GET_CONST(float, node->Op()->GetAttr("Scale_in")); PADDLE_GET_CONST(float, node->Op()->GetAttr("Scale_in"));
EXPECT_EQ(op_scale_in, scale_in); EXPECT_EQ(op_scale_in, scale_in);
float op_scale_out = float op_scale_out =
BOOST_GET_CONST(float, node->Op()->GetAttr("Scale_out")); PADDLE_GET_CONST(float, node->Op()->GetAttr("Scale_out"));
EXPECT_EQ(op_scale_out, scale_out); EXPECT_EQ(op_scale_out, scale_out);
} }
} }
......
...@@ -133,12 +133,12 @@ TEST(DepthwiseConvMKLDNNPass, basic) { ...@@ -133,12 +133,12 @@ TEST(DepthwiseConvMKLDNNPass, basic) {
if (node->IsOp()) { if (node->IsOp()) {
auto* op = node->Op(); auto* op = node->Op();
if (op->Type() == "conv2d") { if (op->Type() == "conv2d") {
if (BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))) if (PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")))
after.mkldnn_conv_nodes++; after.mkldnn_conv_nodes++;
else else
after.other_conv_nodes++; after.other_conv_nodes++;
} else if (op->Type() == "depthwise_conv2d") { } else if (op->Type() == "depthwise_conv2d") {
if (BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))) if (PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")))
after.mkldnn_depthwise_conv_nodes++; after.mkldnn_depthwise_conv_nodes++;
else else
after.other_depthwise_conv_nodes++; after.other_depthwise_conv_nodes++;
......
...@@ -68,7 +68,7 @@ void ElementwiseActivationOneDNNPass::FuseElementwiseAct( ...@@ -68,7 +68,7 @@ void ElementwiseActivationOneDNNPass::FuseElementwiseAct(
const std::string wo_elt_type = const std::string wo_elt_type =
"The " + elt_type; // Workaround for PP error message checking. "The " + elt_type; // Workaround for PP error message checking.
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
BOOST_GET_CONST(bool, elementwise_op->GetAttr("use_mkldnn")), PADDLE_GET_CONST(bool, elementwise_op->GetAttr("use_mkldnn")),
true, true,
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
wo_elt_type + "+Act fusion may happen only when oneDNN library " wo_elt_type + "+Act fusion may happen only when oneDNN library "
...@@ -85,7 +85,7 @@ void ElementwiseActivationOneDNNPass::FuseElementwiseAct( ...@@ -85,7 +85,7 @@ void ElementwiseActivationOneDNNPass::FuseElementwiseAct(
} }
if (act_type == "gelu" && activation_op->HasAttr("approximate") && if (act_type == "gelu" && activation_op->HasAttr("approximate") &&
BOOST_GET_CONST(bool, activation_op->GetAttr("approximate"))) PADDLE_GET_CONST(bool, activation_op->GetAttr("approximate")))
elementwise_op->SetAttr("fuse_activation", std::string("gelu_tanh")); elementwise_op->SetAttr("fuse_activation", std::string("gelu_tanh"));
else else
elementwise_op->SetAttr("fuse_activation", act_type); elementwise_op->SetAttr("fuse_activation", act_type);
......
...@@ -56,14 +56,14 @@ void FuseFCActOneDNNPass::FuseFCAct(Graph *graph, ...@@ -56,14 +56,14 @@ void FuseFCActOneDNNPass::FuseFCAct(Graph *graph,
if (fc_op->HasAttr("use_mkldnn")) { if (fc_op->HasAttr("use_mkldnn")) {
PADDLE_ENFORCE( PADDLE_ENFORCE(
BOOST_GET_CONST(bool, fc_op->GetAttr("use_mkldnn")), PADDLE_GET_CONST(bool, fc_op->GetAttr("use_mkldnn")),
platform::errors::PreconditionNotMet( platform::errors::PreconditionNotMet(
"The FC+Act fusion may happen only when oneDNN library " "The FC+Act fusion may happen only when oneDNN library "
"is used.")); "is used."));
} }
if (act_type == "gelu" && act_op->HasAttr("approximate")) { if (act_type == "gelu" && act_op->HasAttr("approximate")) {
bool approximate = BOOST_GET_CONST(bool, act_op->GetAttr("approximate")); bool approximate = PADDLE_GET_CONST(bool, act_op->GetAttr("approximate"));
std::string type = approximate ? "_tanh" : "_erf"; std::string type = approximate ? "_tanh" : "_erf";
fc_op->SetAttr("activation_type", act_type + type); fc_op->SetAttr("activation_type", act_type + type);
} else { } else {
......
...@@ -77,10 +77,10 @@ TEST(FuseFCActOneDNNPass, FuseWithGeluTanh) { ...@@ -77,10 +77,10 @@ TEST(FuseFCActOneDNNPass, FuseWithGeluTanh) {
if (node->IsOp() && node->Op()->Type() == "fc") { if (node->IsOp() && node->Op()->Type() == "fc") {
const auto* op = node->Op(); const auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn")); ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")));
ASSERT_TRUE(op->HasAttr("activation_type")); ASSERT_TRUE(op->HasAttr("activation_type"));
auto act_type = auto act_type =
BOOST_GET_CONST(std::string, op->GetAttr("activation_type")); PADDLE_GET_CONST(std::string, op->GetAttr("activation_type"));
EXPECT_EQ(act_type.compare("gelu_tanh"), 0); EXPECT_EQ(act_type.compare("gelu_tanh"), 0);
} }
} }
...@@ -112,10 +112,10 @@ TEST(FuseFCActOneDNNPass, FuseWithGeluErf) { ...@@ -112,10 +112,10 @@ TEST(FuseFCActOneDNNPass, FuseWithGeluErf) {
if (node->IsOp() && node->Op()->Type() == "fc") { if (node->IsOp() && node->Op()->Type() == "fc") {
const auto* op = node->Op(); const auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn")); ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")));
ASSERT_TRUE(op->HasAttr("activation_type")); ASSERT_TRUE(op->HasAttr("activation_type"));
auto act_type = auto act_type =
BOOST_GET_CONST(std::string, op->GetAttr("activation_type")); PADDLE_GET_CONST(std::string, op->GetAttr("activation_type"));
EXPECT_EQ(act_type.compare("gelu_erf"), 0); EXPECT_EQ(act_type.compare("gelu_erf"), 0);
} }
} }
...@@ -145,10 +145,10 @@ TEST(FuseFCActOneDNNPass, FuseWithGeluAuto) { ...@@ -145,10 +145,10 @@ TEST(FuseFCActOneDNNPass, FuseWithGeluAuto) {
if (node->IsOp() && node->Op()->Type() == "fc") { if (node->IsOp() && node->Op()->Type() == "fc") {
const auto* op = node->Op(); const auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn")); ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")));
ASSERT_TRUE(op->HasAttr("activation_type")); ASSERT_TRUE(op->HasAttr("activation_type"));
auto act_type = auto act_type =
BOOST_GET_CONST(std::string, op->GetAttr("activation_type")); PADDLE_GET_CONST(std::string, op->GetAttr("activation_type"));
EXPECT_EQ(act_type.compare("gelu"), 0); EXPECT_EQ(act_type.compare("gelu"), 0);
} }
} }
...@@ -178,10 +178,10 @@ TEST(FuseFCActOneDNNPass, FuseWithTanh) { ...@@ -178,10 +178,10 @@ TEST(FuseFCActOneDNNPass, FuseWithTanh) {
if (node->IsOp() && node->Op()->Type() == "fc") { if (node->IsOp() && node->Op()->Type() == "fc") {
const auto* op = node->Op(); const auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn")); ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")));
ASSERT_TRUE(op->HasAttr("activation_type")); ASSERT_TRUE(op->HasAttr("activation_type"));
auto act_type = auto act_type =
BOOST_GET_CONST(std::string, op->GetAttr("activation_type")); PADDLE_GET_CONST(std::string, op->GetAttr("activation_type"));
EXPECT_EQ(act_type.compare("tanh"), 0); EXPECT_EQ(act_type.compare("tanh"), 0);
} }
} }
...@@ -212,10 +212,10 @@ TEST(FuseFCActOneDNNPass, FuseWithSigmoid) { ...@@ -212,10 +212,10 @@ TEST(FuseFCActOneDNNPass, FuseWithSigmoid) {
if (node->IsOp() && node->Op()->Type() == "fc") { if (node->IsOp() && node->Op()->Type() == "fc") {
const auto* op = node->Op(); const auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn")); ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")));
ASSERT_TRUE(op->HasAttr("activation_type")); ASSERT_TRUE(op->HasAttr("activation_type"));
auto act_type = auto act_type =
BOOST_GET_CONST(std::string, op->GetAttr("activation_type")); PADDLE_GET_CONST(std::string, op->GetAttr("activation_type"));
EXPECT_EQ(act_type.compare("sigmoid"), 0); EXPECT_EQ(act_type.compare("sigmoid"), 0);
} }
} }
...@@ -245,10 +245,10 @@ TEST(FuseFCActOneDNNPass, FuseWithMish) { ...@@ -245,10 +245,10 @@ TEST(FuseFCActOneDNNPass, FuseWithMish) {
if (node->IsOp() && node->Op()->Type() == "fc") { if (node->IsOp() && node->Op()->Type() == "fc") {
const auto* op = node->Op(); const auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn")); ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")));
ASSERT_TRUE(op->HasAttr("activation_type")); ASSERT_TRUE(op->HasAttr("activation_type"));
auto act_type = auto act_type =
BOOST_GET_CONST(std::string, op->GetAttr("activation_type")); PADDLE_GET_CONST(std::string, op->GetAttr("activation_type"));
EXPECT_EQ(act_type.compare("mish"), 0); EXPECT_EQ(act_type.compare("mish"), 0);
} }
} }
...@@ -279,10 +279,10 @@ TEST(FuseFCActOneDNNPass, FuseWithHardSwish) { ...@@ -279,10 +279,10 @@ TEST(FuseFCActOneDNNPass, FuseWithHardSwish) {
if (node->IsOp() && node->Op()->Type() == "fc") { if (node->IsOp() && node->Op()->Type() == "fc") {
const auto* op = node->Op(); const auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn")); ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")));
ASSERT_TRUE(op->HasAttr("activation_type")); ASSERT_TRUE(op->HasAttr("activation_type"));
auto act_type = auto act_type =
BOOST_GET_CONST(std::string, op->GetAttr("activation_type")); PADDLE_GET_CONST(std::string, op->GetAttr("activation_type"));
EXPECT_EQ(act_type.compare("hard_swish"), 0); EXPECT_EQ(act_type.compare("hard_swish"), 0);
} }
} }
......
...@@ -72,9 +72,10 @@ void MatmulActivationMkldnnFusePass::FuseMatmulAct( ...@@ -72,9 +72,10 @@ void MatmulActivationMkldnnFusePass::FuseMatmulAct(
} }
if (act_type == "gelu" && activation->Op()->HasAttr("approximate")) { if (act_type == "gelu" && activation->Op()->HasAttr("approximate")) {
act_type = BOOST_GET_CONST(bool, activation->Op()->GetAttr("approximate")) act_type =
? "gelu_tanh" PADDLE_GET_CONST(bool, activation->Op()->GetAttr("approximate"))
: "gelu_erf"; ? "gelu_tanh"
: "gelu_erf";
} }
matmul_op->SetAttr("fuse_activation", act_type); matmul_op->SetAttr("fuse_activation", act_type);
matmul_op->SetOutput("Out", {activation_out->Name()}); matmul_op->SetOutput("Out", {activation_out->Name()});
......
...@@ -113,9 +113,9 @@ void MatmulTransposeReshapeMKLDNNPass::ApplyImpl(ir::Graph *graph) const { ...@@ -113,9 +113,9 @@ void MatmulTransposeReshapeMKLDNNPass::ApplyImpl(ir::Graph *graph) const {
GET_IR_NODE_FROM_SUBGRAPH(reshape_out, reshape_out, mtrp); GET_IR_NODE_FROM_SUBGRAPH(reshape_out, reshape_out, mtrp);
GET_IR_NODE_FROM_SUBGRAPH(reshape_out_xshape, reshape_out_xshape, mtrp); GET_IR_NODE_FROM_SUBGRAPH(reshape_out_xshape, reshape_out_xshape, mtrp);
auto reshape_shape = auto reshape_shape =
BOOST_GET_CONST(std::vector<int>, reshape_op->Op()->GetAttr("shape")); PADDLE_GET_CONST(std::vector<int>, reshape_op->Op()->GetAttr("shape"));
auto transpose_axis = auto transpose_axis =
BOOST_GET_CONST(std::vector<int>, transpose_op->Op()->GetAttr("axis")); PADDLE_GET_CONST(std::vector<int>, transpose_op->Op()->GetAttr("axis"));
auto reshape_out_size = reshape_shape.size(); auto reshape_out_size = reshape_shape.size();
auto transpose_out_size = transpose_axis.size(); auto transpose_out_size = transpose_axis.size();
......
...@@ -56,7 +56,7 @@ void MKLDNNInPlacePass::ApplyImpl(ir::Graph* graph) const { ...@@ -56,7 +56,7 @@ void MKLDNNInPlacePass::ApplyImpl(ir::Graph* graph) const {
GET_IR_NODE_FROM_SUBGRAPH(next_op_out, next_op_out, mkldnn_inplace); GET_IR_NODE_FROM_SUBGRAPH(next_op_out, next_op_out, mkldnn_inplace);
if ((current_op->Op()->HasAttr("use_mkldnn") == false) || if ((current_op->Op()->HasAttr("use_mkldnn") == false) ||
(BOOST_GET_CONST(bool, current_op->Op()->GetAttr("use_mkldnn")) == (PADDLE_GET_CONST(bool, current_op->Op()->GetAttr("use_mkldnn")) ==
false)) { false)) {
VLOG(3) << "do not perform mkl-dnn inplace: use_mkldnn missing or set to " VLOG(3) << "do not perform mkl-dnn inplace: use_mkldnn missing or set to "
"false"; "false";
......
...@@ -67,7 +67,7 @@ static void GetInfoFromTheFirstOp( ...@@ -67,7 +67,7 @@ static void GetInfoFromTheFirstOp(
if (pos != std::string::npos) { if (pos != std::string::npos) {
std::string name = fake_name.substr(0, pos); std::string name = fake_name.substr(0, pos);
auto scales_vector = auto scales_vector =
BOOST_GET_CONST(std::vector<float>, op_desc->GetAttr(fake_name)); PADDLE_GET_CONST(std::vector<float>, op_desc->GetAttr(fake_name));
info_map->insert(std::make_pair(name, scales_vector)); info_map->insert(std::make_pair(name, scales_vector));
op_desc->RemoveAttr(fake_name); op_desc->RemoveAttr(fake_name);
} }
......
...@@ -145,7 +145,7 @@ class PlacementPassTest { ...@@ -145,7 +145,7 @@ class PlacementPassTest {
if (node->IsOp()) { if (node->IsOp()) {
auto* op = node->Op(); auto* op = node->Op();
if (op->HasAttr("use_mkldnn") && if (op->HasAttr("use_mkldnn") &&
BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))) { PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))) {
++use_mkldnn_true_count; ++use_mkldnn_true_count;
} }
} }
......
...@@ -103,8 +103,8 @@ void MultiGruSeqFusePass::ApplyImpl(ir::Graph* graph) const { ...@@ -103,8 +103,8 @@ void MultiGruSeqFusePass::ApplyImpl(ir::Graph* graph) const {
multi_gru_desc.SetAttr(attr.first, attr.second); multi_gru_desc.SetAttr(attr.first, attr.second);
} }
auto layers = BOOST_GET_CONST(int, gru1->Op()->GetAttr("layers")) + auto layers = PADDLE_GET_CONST(int, gru1->Op()->GetAttr("layers")) +
BOOST_GET_CONST(int, gru2->Op()->GetAttr("layers")); PADDLE_GET_CONST(int, gru2->Op()->GetAttr("layers"));
multi_gru_desc.SetAttr("layers", layers); multi_gru_desc.SetAttr("layers", layers);
auto multi_gru = auto multi_gru =
......
...@@ -72,7 +72,7 @@ void QuantDequantMkldnnPass::CollectInfoFromFake( ...@@ -72,7 +72,7 @@ void QuantDequantMkldnnPass::CollectInfoFromFake(
if (op_desc->HasAttr("max_range")) { if (op_desc->HasAttr("max_range")) {
const float max_range = const float max_range =
BOOST_GET_CONST(float, op_desc->GetAttr("max_range")); PADDLE_GET_CONST(float, op_desc->GetAttr("max_range"));
std::vector<float> thresholds = {127 * 127 / max_range}; std::vector<float> thresholds = {127 * 127 / max_range};
weight_thresholds->insert(std::make_pair(x_var_name, thresholds)); weight_thresholds->insert(std::make_pair(x_var_name, thresholds));
} else { } else {
...@@ -111,7 +111,7 @@ void QuantDequantMkldnnPass::CollectInputScalesFromFake( ...@@ -111,7 +111,7 @@ void QuantDequantMkldnnPass::CollectInputScalesFromFake(
fake_quantize_types.count(op_node->Name())) { fake_quantize_types.count(op_node->Name())) {
auto* op_desc = op_node->Op(); auto* op_desc = op_node->Op();
const int bit_length = const int bit_length =
BOOST_GET_CONST(int, op_desc->GetAttr("bit_length")); PADDLE_GET_CONST(int, op_desc->GetAttr("bit_length"));
PADDLE_ENFORCE_EQ(bit_length, PADDLE_ENFORCE_EQ(bit_length,
8, 8,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
...@@ -160,7 +160,7 @@ void QuantDequantMkldnnPass::CollectOutputScalesFromAttr( ...@@ -160,7 +160,7 @@ void QuantDequantMkldnnPass::CollectOutputScalesFromAttr(
auto* op_desc = op_node->Op(); auto* op_desc = op_node->Op();
if (op_desc->HasAttr("out_threshold")) { if (op_desc->HasAttr("out_threshold")) {
const float attr_scale = const float attr_scale =
BOOST_GET_CONST(float, op_desc->GetAttr("out_threshold")); PADDLE_GET_CONST(float, op_desc->GetAttr("out_threshold"));
if (attr_scale == 0.0) continue; if (attr_scale == 0.0) continue;
float scale = 1.0 / attr_scale; float scale = 1.0 / attr_scale;
std::vector<float> scale_v = {scale}; std::vector<float> scale_v = {scale};
......
...@@ -101,11 +101,11 @@ void ShuffleChannelMKLDNNDetectPass::ApplyImpl(ir::Graph* graph) const { ...@@ -101,11 +101,11 @@ void ShuffleChannelMKLDNNDetectPass::ApplyImpl(ir::Graph* graph) const {
std::string output_name = reshape2_out->Name(); std::string output_name = reshape2_out->Name();
auto reshape1_shape = auto reshape1_shape =
BOOST_GET_CONST(std::vector<int>, reshape1_desc->GetAttr("shape")); PADDLE_GET_CONST(std::vector<int>, reshape1_desc->GetAttr("shape"));
auto reshape2_shape = auto reshape2_shape =
BOOST_GET_CONST(std::vector<int>, reshape2_desc->GetAttr("shape")); PADDLE_GET_CONST(std::vector<int>, reshape2_desc->GetAttr("shape"));
auto trans_axis = auto trans_axis =
BOOST_GET_CONST(std::vector<int>, trans_desc->GetAttr("axis")); PADDLE_GET_CONST(std::vector<int>, trans_desc->GetAttr("axis"));
auto* block1 = reshape1_desc->Block(); auto* block1 = reshape1_desc->Block();
auto* block2 = reshape2_desc->Block(); auto* block2 = reshape2_desc->Block();
if (block1 && block2) { if (block1 && block2) {
......
...@@ -69,7 +69,7 @@ void MainTest() { ...@@ -69,7 +69,7 @@ void MainTest() {
if (node->IsOp() && node->Op()->Type() == "shuffle_channel") { if (node->IsOp() && node->Op()->Type() == "shuffle_channel") {
const auto* op = node->Op(); const auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn")); ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")));
} }
} }
} }
......
...@@ -62,7 +62,7 @@ void SoftplusActivationOneDNNPass::FuseSoftplusActivation( ...@@ -62,7 +62,7 @@ void SoftplusActivationOneDNNPass::FuseSoftplusActivation(
if (softplus_op->HasAttr("use_mkldnn")) { if (softplus_op->HasAttr("use_mkldnn")) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
BOOST_GET_CONST(bool, softplus_op->GetAttr("use_mkldnn")), PADDLE_GET_CONST(bool, softplus_op->GetAttr("use_mkldnn")),
true, true,
platform::errors::PreconditionNotMet("The softplus + activation " platform::errors::PreconditionNotMet("The softplus + activation "
"fusion may happen only when " "fusion may happen only when "
...@@ -78,7 +78,7 @@ void SoftplusActivationOneDNNPass::FuseSoftplusActivation( ...@@ -78,7 +78,7 @@ void SoftplusActivationOneDNNPass::FuseSoftplusActivation(
} }
if (act_type == "gelu" && activation_op->HasAttr("approximate") && if (act_type == "gelu" && activation_op->HasAttr("approximate") &&
BOOST_GET_CONST(bool, activation_op->GetAttr("approximate"))) PADDLE_GET_CONST(bool, activation_op->GetAttr("approximate")))
softplus_op->SetAttr("fuse_activation", std::string("gelu_tanh")); softplus_op->SetAttr("fuse_activation", std::string("gelu_tanh"));
else else
softplus_op->SetAttr("fuse_activation", act_type); softplus_op->SetAttr("fuse_activation", act_type);
......
...@@ -49,40 +49,44 @@ void MainTest(const std::string& activation_type) { ...@@ -49,40 +49,44 @@ void MainTest(const std::string& activation_type) {
if (node->IsOp() && node->Op()->Type() == "softplus") { if (node->IsOp() && node->Op()->Type() == "softplus") {
const auto* op = node->Op(); const auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn")); ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")));
ASSERT_TRUE(op->HasAttr("fuse_activation")); ASSERT_TRUE(op->HasAttr("fuse_activation"));
auto activation_type = auto activation_type =
BOOST_GET_CONST(std::string, op->GetAttr("fuse_activation")); PADDLE_GET_CONST(std::string, op->GetAttr("fuse_activation"));
EXPECT_EQ(activation_type.compare(activation_type), 0); EXPECT_EQ(activation_type.compare(activation_type), 0);
} }
} }
} }
TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithTanh){MainTest("tanh")} // clang-format off
TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithTanh) {MainTest("tanh")}
TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithRelu){MainTest("relu")} TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithRelu) {MainTest("relu")}
TEST(FuseSoftplusActivationOneDNNPass, TEST(FuseSoftplusActivationOneDNNPass,
FuseSoftplusWithLeakyRelu){MainTest("leaky_relu")} FuseSoftplusWithLeakyRelu) {MainTest("leaky_relu")}
TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithSwish){MainTest("swish")} TEST(FuseSoftplusActivationOneDNNPass,
FuseSoftplusWithSwish) {MainTest("swish")}
TEST(FuseSoftplusActivationOneDNNPass, TEST(FuseSoftplusActivationOneDNNPass,
FuseSoftplusWithHardswish){MainTest("hardswish")} FuseSoftplusWithHardswish) {MainTest("hardswish")}
TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithSqrt){MainTest("sqrt")} TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithSqrt) {MainTest("sqrt")}
TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithAbs){MainTest("abs")} TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithAbs) {MainTest("abs")}
TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithClip){MainTest("clip")} TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithClip) {MainTest("clip")}
TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithGelu){MainTest("gelu")} TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithGelu) {MainTest("gelu")}
TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithRelu6){MainTest("relu6")} TEST(FuseSoftplusActivationOneDNNPass,
FuseSoftplusWithRelu6) {MainTest("relu6")}
TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithSigmoid) { TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithSigmoid) {
MainTest("sigmoid") MainTest("sigmoid")
} }
// clang-format on
} // namespace ir } // namespace ir
} // namespace framework } // namespace framework
......
...@@ -87,7 +87,7 @@ void BatchMergePass::ApplyImpl(ir::Graph* graph) const { ...@@ -87,7 +87,7 @@ void BatchMergePass::ApplyImpl(ir::Graph* graph) const {
node->Op(), node->Op(),
platform::errors::InvalidArgument("Node(%s) must hold op description.", platform::errors::InvalidArgument("Node(%s) must hold op description.",
node->Name())); node->Name()));
int op_role = BOOST_GET_CONST( int op_role = PADDLE_GET_CONST(
int, int,
node->Op()->GetAttr( node->Op()->GetAttr(
framework::OpProtoAndCheckerMaker::OpRoleAttrName())); framework::OpProtoAndCheckerMaker::OpRoleAttrName()));
...@@ -102,7 +102,7 @@ void BatchMergePass::ApplyImpl(ir::Graph* graph) const { ...@@ -102,7 +102,7 @@ void BatchMergePass::ApplyImpl(ir::Graph* graph) const {
auto op_role_var = node->Op()->GetNullableAttr( auto op_role_var = node->Op()->GetNullableAttr(
OpProtoAndCheckerMaker::OpRoleVarAttrName()); OpProtoAndCheckerMaker::OpRoleVarAttrName());
auto op_role_vars = auto op_role_vars =
BOOST_GET_CONST(std::vector<std::string>, op_role_var); PADDLE_GET_CONST(std::vector<std::string>, op_role_var);
for (size_t i = 0; i < op_role_vars.size(); i += 2) { for (size_t i = 0; i < op_role_vars.size(); i += 2) {
grad_names.insert(op_role_vars[i + 1]); grad_names.insert(op_role_vars[i + 1]);
gradname2paramname[op_role_vars[i + 1]] = op_role_vars[i]; gradname2paramname[op_role_vars[i + 1]] = op_role_vars[i];
......
...@@ -53,7 +53,7 @@ typedef std::vector<details::OpHandleBase *> GraphOps; ...@@ -53,7 +53,7 @@ typedef std::vector<details::OpHandleBase *> GraphOps;
const char kGraphOps[] = "ops"; const char kGraphOps[] = "ops";
bool OpHaveRole(const ir::Node &node, const framework::OpRole &role) { bool OpHaveRole(const ir::Node &node, const framework::OpRole &role) {
return BOOST_GET_CONST( return PADDLE_GET_CONST(
int, int,
node.Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) == node.Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
static_cast<int>(role); static_cast<int>(role);
...@@ -549,7 +549,7 @@ void MultiDevSSAGraphBuilderBase::CreateAllReduceOp(ir::Graph *result, ...@@ -549,7 +549,7 @@ void MultiDevSSAGraphBuilderBase::CreateAllReduceOp(ir::Graph *result,
"Please compile PaddlePaddle WITH_DGC first.")); "Please compile PaddlePaddle WITH_DGC first."));
#endif #endif
} else if (is_grad_merge) { } else if (is_grad_merge) {
grad_merge_cond_name = BOOST_GET_CONST( grad_merge_cond_name = PADDLE_GET_CONST(
std::string, node->Op()->GetAttr(GRAD_MERGE_COND_NAME)); std::string, node->Op()->GetAttr(GRAD_MERGE_COND_NAME));
VLOG(10) << "og=" << og << " use grad_merge_allreduce"; VLOG(10) << "og=" << og << " use grad_merge_allreduce";
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
...@@ -766,7 +766,7 @@ details::VarHandle *MultiDevSSAGraphBuilderBase::CreateReduceOp( ...@@ -766,7 +766,7 @@ details::VarHandle *MultiDevSSAGraphBuilderBase::CreateReduceOp(
bool MultiDevSSAGraphBuilderBase::IsScaleLossOp(ir::Node *node) const { bool MultiDevSSAGraphBuilderBase::IsScaleLossOp(ir::Node *node) const {
return !loss_var_name_.empty() && node->Op() && return !loss_var_name_.empty() && node->Op() &&
BOOST_GET_CONST( PADDLE_GET_CONST(
int, int,
node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) == node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
(static_cast<int>(OpRole::kBackward) | (static_cast<int>(OpRole::kBackward) |
...@@ -830,7 +830,7 @@ int BalanceVarSSAGraphBuilder::GetOpDeviceID(ir::Node *node) const { ...@@ -830,7 +830,7 @@ int BalanceVarSSAGraphBuilder::GetOpDeviceID(ir::Node *node) const {
if (!OpHaveRole(*node, framework::OpRole::kOptimize)) { if (!OpHaveRole(*node, framework::OpRole::kOptimize)) {
return -1; return -1;
} }
auto param_grad = BOOST_GET_CONST( auto param_grad = PADDLE_GET_CONST(
std::vector<std::string>, std::vector<std::string>,
node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName())); node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
...@@ -951,7 +951,7 @@ int ReduceSSAGraphBuilder::GetOpDeviceID( ...@@ -951,7 +951,7 @@ int ReduceSSAGraphBuilder::GetOpDeviceID(
return -1; return -1;
} }
auto param_grad = BOOST_GET_CONST( auto param_grad = PADDLE_GET_CONST(
std::vector<std::string>, std::vector<std::string>,
node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName())); node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
...@@ -1007,7 +1007,7 @@ std::vector<ir::Node *> ReduceSSAGraphBuilder::SortForReduceMode( ...@@ -1007,7 +1007,7 @@ std::vector<ir::Node *> ReduceSSAGraphBuilder::SortForReduceMode(
// gradients. // gradients.
sorted_ops.emplace_back(node); sorted_ops.emplace_back(node);
bool is_bk_op = static_cast<bool>( bool is_bk_op = static_cast<bool>(
BOOST_GET_CONST( PADDLE_GET_CONST(
int, int,
node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) & node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) &
static_cast<int>(OpRole::kBackward)); static_cast<int>(OpRole::kBackward));
...@@ -1062,9 +1062,9 @@ bool DistSSAGraphBuilder::DealWithSpecialOp(ir::Graph *result, ...@@ -1062,9 +1062,9 @@ bool DistSSAGraphBuilder::DealWithSpecialOp(ir::Graph *result,
node->Name())); node->Name()));
if (node->Op()->Type() == "recv") { if (node->Op()->Type() == "recv") {
auto recv_vars_attr = auto recv_vars_attr =
BOOST_GET_CONST(std::vector<std::string>, PADDLE_GET_CONST(std::vector<std::string>,
node->Op()->GetNullableAttr( node->Op()->GetNullableAttr(
OpProtoAndCheckerMaker::OpRoleVarAttrName())); OpProtoAndCheckerMaker::OpRoleVarAttrName()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
recv_vars_attr.size(), recv_vars_attr.size(),
2UL, 2UL,
...@@ -1138,7 +1138,7 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const { ...@@ -1138,7 +1138,7 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const {
for (ir::Node *n : node->inputs) { for (ir::Node *n : node->inputs) {
input_var_names.push_back(n->Name()); input_var_names.push_back(n->Name());
} }
auto send_param_grad = BOOST_GET_CONST( auto send_param_grad = PADDLE_GET_CONST(
std::vector<std::string>, std::vector<std::string>,
node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName())); node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
...@@ -1162,7 +1162,7 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const { ...@@ -1162,7 +1162,7 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const {
for (ir::Node *n : node->outputs) { for (ir::Node *n : node->outputs) {
output_var_names.push_back(n->Name()); output_var_names.push_back(n->Name());
} }
auto recv_param_grad = BOOST_GET_CONST( auto recv_param_grad = PADDLE_GET_CONST(
std::vector<std::string>, std::vector<std::string>,
node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName())); node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
if (recv_param_grad.size() == 2U) { if (recv_param_grad.size() == 2U) {
......
...@@ -66,10 +66,10 @@ static int BuildFusion(Graph* graph, const std::string& name_scope) { ...@@ -66,10 +66,10 @@ static int BuildFusion(Graph* graph, const std::string& name_scope) {
Node* reshape2_qkv_out, Node* reshape2_qkv_out,
Node* scale, Node* scale,
Node* scale_out) { Node* scale_out) {
auto scale_attr = BOOST_GET_CONST(float, scale->Op()->GetAttr("scale")); auto scale_attr = PADDLE_GET_CONST(float, scale->Op()->GetAttr("scale"));
// auto scale_bias = BOOST_GET_CONST(float, scale->Op()->GetAttr("bias")); // auto scale_bias = PADDLE_GET_CONST(float, scale->Op()->GetAttr("bias"));
// bool after_scale = // bool after_scale =
// BOOST_GET_CONST(bool, scale->Op()->GetAttr("bias_after_scale")); // PADDLE_GET_CONST(bool, scale->Op()->GetAttr("bias_after_scale"));
// create multihead // create multihead
OpDesc multihead_op_desc(mul0->Op()->Block()); OpDesc multihead_op_desc(mul0->Op()->Block());
...@@ -89,7 +89,8 @@ static int BuildFusion(Graph* graph, const std::string& name_scope) { ...@@ -89,7 +89,8 @@ static int BuildFusion(Graph* graph, const std::string& name_scope) {
auto reshape_desc = reshape2->Op(); auto reshape_desc = reshape2->Op();
int head_number = int head_number =
BOOST_GET_CONST(std::vector<int>, reshape_desc->GetAttr("shape")).at(2); PADDLE_GET_CONST(std::vector<int>, reshape_desc->GetAttr("shape"))
.at(2);
ReplaceOutputVar(mul0, mul0_out, q_var_node); ReplaceOutputVar(mul0, mul0_out, q_var_node);
ReplaceOutputVar(mul1, mul1_out, k_var_node); ReplaceOutputVar(mul1, mul1_out, k_var_node);
...@@ -803,7 +804,7 @@ int MultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, ...@@ -803,7 +804,7 @@ int MultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph,
Node* eltadd2, Node* eltadd2,
Node* matmul_qk, Node* matmul_qk,
Node* reshape2_qkv) { Node* reshape2_qkv) {
auto scale_attr = BOOST_GET_CONST(float, scale->Op()->GetAttr("scale")); auto scale_attr = PADDLE_GET_CONST(float, scale->Op()->GetAttr("scale"));
// mul (B * S * Hidden) x (Hidden * 3 * N * H) = (B * S * 3 * N * H) // mul (B * S * Hidden) x (Hidden * 3 * N * H) = (B * S * 3 * N * H)
// bias (B * S * 3 * N * H) + bias (3 * N * H) // bias (B * S * 3 * N * H) + bias (3 * N * H)
...@@ -890,7 +891,8 @@ int MultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, ...@@ -890,7 +891,8 @@ int MultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph,
auto reshape_desc = reshape2->Op(); auto reshape_desc = reshape2->Op();
int head_number = int head_number =
BOOST_GET_CONST(std::vector<int>, reshape_desc->GetAttr("shape")).at(2); PADDLE_GET_CONST(std::vector<int>, reshape_desc->GetAttr("shape"))
.at(2);
OpDesc multihead_op_desc(mul0->Op()->Block()); OpDesc multihead_op_desc(mul0->Op()->Block());
multihead_op_desc.SetType("multihead_matmul"); multihead_op_desc.SetType("multihead_matmul");
...@@ -916,11 +918,11 @@ int MultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, ...@@ -916,11 +918,11 @@ int MultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph,
auto* add2_op_desc = eltadd2->Op(); auto* add2_op_desc = eltadd2->Op();
if (add0_op_desc->HasAttr("out_threshold")) { if (add0_op_desc->HasAttr("out_threshold")) {
auto out_scale0 = auto out_scale0 =
BOOST_GET_CONST(float, add0_op_desc->GetAttr("out_threshold")); PADDLE_GET_CONST(float, add0_op_desc->GetAttr("out_threshold"));
auto out_scale1 = auto out_scale1 =
BOOST_GET_CONST(float, add1_op_desc->GetAttr("out_threshold")); PADDLE_GET_CONST(float, add1_op_desc->GetAttr("out_threshold"));
auto out_scale2 = auto out_scale2 =
BOOST_GET_CONST(float, add2_op_desc->GetAttr("out_threshold")); PADDLE_GET_CONST(float, add2_op_desc->GetAttr("out_threshold"));
auto out_scale_max = std::max(out_scale0, out_scale1); auto out_scale_max = std::max(out_scale0, out_scale1);
out_scale_max = std::max(out_scale_max, out_scale2); out_scale_max = std::max(out_scale_max, out_scale2);
multihead_op_desc.SetAttr("fc_out_threshold", out_scale_max); multihead_op_desc.SetAttr("fc_out_threshold", out_scale_max);
...@@ -931,7 +933,7 @@ int MultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, ...@@ -931,7 +933,7 @@ int MultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph,
if (matmul_qk_op_desc->HasAttr("Input_scale")) { if (matmul_qk_op_desc->HasAttr("Input_scale")) {
multihead_op_desc.SetAttr("qkv2context_plugin_int8", true); multihead_op_desc.SetAttr("qkv2context_plugin_int8", true);
if (softmax_qk_op_desc->HasAttr("out_threshold")) { if (softmax_qk_op_desc->HasAttr("out_threshold")) {
auto qkv_plugin_scale = BOOST_GET_CONST( auto qkv_plugin_scale = PADDLE_GET_CONST(
float, softmax_qk_op_desc->GetAttr("out_threshold")); float, softmax_qk_op_desc->GetAttr("out_threshold"));
multihead_op_desc.SetAttr("dp_probs", qkv_plugin_scale); multihead_op_desc.SetAttr("dp_probs", qkv_plugin_scale);
} }
...@@ -1287,7 +1289,8 @@ int MultiHeadMatmulV3FusePass::BuildFusionV3(Graph* graph, ...@@ -1287,7 +1289,8 @@ int MultiHeadMatmulV3FusePass::BuildFusionV3(Graph* graph,
Node* reshape2, Node* reshape2,
Node* reshape2_qkv_out, Node* reshape2_qkv_out,
Node* matmul_qk) { Node* matmul_qk) {
auto scale_attr = BOOST_GET_CONST(float, matmul_qk->Op()->GetAttr("alpha")); auto scale_attr =
PADDLE_GET_CONST(float, matmul_qk->Op()->GetAttr("alpha"));
// mul (B * S * Hidden) x (Hidden * 3 * N * H) = (B * S * 3 * N * H) // mul (B * S * Hidden) x (Hidden * 3 * N * H) = (B * S * 3 * N * H)
// bias (B * S * 3 * N * H) + bias (3 * N * H) // bias (B * S * 3 * N * H) + bias (3 * N * H)
...@@ -1374,7 +1377,8 @@ int MultiHeadMatmulV3FusePass::BuildFusionV3(Graph* graph, ...@@ -1374,7 +1377,8 @@ int MultiHeadMatmulV3FusePass::BuildFusionV3(Graph* graph,
auto reshape_desc = reshape2->Op(); auto reshape_desc = reshape2->Op();
int head_number = int head_number =
BOOST_GET_CONST(std::vector<int>, reshape_desc->GetAttr("shape")).at(2); PADDLE_GET_CONST(std::vector<int>, reshape_desc->GetAttr("shape"))
.at(2);
OpDesc multihead_op_desc(mul0->Op()->Block()); OpDesc multihead_op_desc(mul0->Op()->Block());
multihead_op_desc.SetType("multihead_matmul"); multihead_op_desc.SetType("multihead_matmul");
......
...@@ -37,7 +37,7 @@ std::unordered_set<std::string> global_extra_attrs = { ...@@ -37,7 +37,7 @@ std::unordered_set<std::string> global_extra_attrs = {
"use_cudnn", "use_cudnn",
"name", "name",
"with_quant_attr"}; "with_quant_attr"};
} } // namespace
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -45,14 +45,14 @@ namespace ir { ...@@ -45,14 +45,14 @@ namespace ir {
AttrCompat& AttrCompat::IsStringEQ(const std::string& value) { AttrCompat& AttrCompat::IsStringEQ(const std::string& value) {
conditions_.emplace_back([value](const Attribute& attr) -> bool { conditions_.emplace_back([value](const Attribute& attr) -> bool {
return value == BOOST_GET_CONST(std::string, attr); return value == PADDLE_GET_CONST(std::string, attr);
}); });
return *this; return *this;
} }
AttrCompat& AttrCompat::IsStringIn(const std::set<std::string>& candidates) { AttrCompat& AttrCompat::IsStringIn(const std::set<std::string>& candidates) {
conditions_.emplace_back([candidates](const Attribute& attr) -> bool { conditions_.emplace_back([candidates](const Attribute& attr) -> bool {
std::string value = BOOST_GET_CONST(std::string, attr); std::string value = PADDLE_GET_CONST(std::string, attr);
for (auto& str : candidates) { for (auto& str : candidates) {
if (str == value) { if (str == value) {
return true; return true;
...@@ -66,7 +66,7 @@ AttrCompat& AttrCompat::IsStringIn(const std::set<std::string>& candidates) { ...@@ -66,7 +66,7 @@ AttrCompat& AttrCompat::IsStringIn(const std::set<std::string>& candidates) {
AttrCompat& AttrCompat::IsStringMatch( AttrCompat& AttrCompat::IsStringMatch(
const std::function<bool(const std::string&)>& func) { const std::function<bool(const std::string&)>& func) {
conditions_.emplace_back([func](const Attribute& attr) -> bool { conditions_.emplace_back([func](const Attribute& attr) -> bool {
std::string value = BOOST_GET_CONST(std::string, attr); std::string value = PADDLE_GET_CONST(std::string, attr);
return func(value); return func(value);
}); });
return *this; return *this;
...@@ -74,7 +74,7 @@ AttrCompat& AttrCompat::IsStringMatch( ...@@ -74,7 +74,7 @@ AttrCompat& AttrCompat::IsStringMatch(
AttrCompat& AttrCompat::IsIntIn(const std::set<int>& candidates) { AttrCompat& AttrCompat::IsIntIn(const std::set<int>& candidates) {
conditions_.emplace_back([candidates](const Attribute& attr) -> bool { conditions_.emplace_back([candidates](const Attribute& attr) -> bool {
int value = BOOST_GET_CONST(int, attr); int value = PADDLE_GET_CONST(int, attr);
return candidates.find(value) != candidates.end(); return candidates.find(value) != candidates.end();
}); });
return *this; return *this;
...@@ -134,7 +134,7 @@ AttrCompat& AttrCompat::IsOptional() { ...@@ -134,7 +134,7 @@ AttrCompat& AttrCompat::IsOptional() {
AttrCompat& AttrCompat::IsBoolEQ(bool v) { AttrCompat& AttrCompat::IsBoolEQ(bool v) {
conditions_.emplace_back([v](const Attribute& attr) -> bool { conditions_.emplace_back([v](const Attribute& attr) -> bool {
bool value = BOOST_GET_CONST(bool, attr); bool value = PADDLE_GET_CONST(bool, attr);
return value == v; return value == v;
}); });
return *this; return *this;
......
...@@ -224,7 +224,7 @@ AttrCompat& AttrCompat::IsType() { ...@@ -224,7 +224,7 @@ AttrCompat& AttrCompat::IsType() {
template <typename T> template <typename T>
AttrCompat& AttrCompat::IsNumGT(T v) { AttrCompat& AttrCompat::IsNumGT(T v) {
conditions_.emplace_back([v](const Attribute& attr) -> bool { conditions_.emplace_back([v](const Attribute& attr) -> bool {
T value = BOOST_GET_CONST(T, attr); T value = PADDLE_GET_CONST(T, attr);
return value > v; return value > v;
}); });
return *this; return *this;
...@@ -233,7 +233,7 @@ AttrCompat& AttrCompat::IsNumGT(T v) { ...@@ -233,7 +233,7 @@ AttrCompat& AttrCompat::IsNumGT(T v) {
template <typename T> template <typename T>
AttrCompat& AttrCompat::IsNumGE(T v) { AttrCompat& AttrCompat::IsNumGE(T v) {
conditions_.emplace_back([v](const Attribute& attr) -> bool { conditions_.emplace_back([v](const Attribute& attr) -> bool {
T value = BOOST_GET_CONST(T, attr); T value = PADDLE_GET_CONST(T, attr);
return value >= v; return value >= v;
}); });
return *this; return *this;
...@@ -242,7 +242,7 @@ AttrCompat& AttrCompat::IsNumGE(T v) { ...@@ -242,7 +242,7 @@ AttrCompat& AttrCompat::IsNumGE(T v) {
template <typename T> template <typename T>
AttrCompat& AttrCompat::IsNumLT(T v) { AttrCompat& AttrCompat::IsNumLT(T v) {
conditions_.emplace_back([v](const Attribute& attr) -> bool { conditions_.emplace_back([v](const Attribute& attr) -> bool {
T value = BOOST_GET_CONST(T, attr); T value = PADDLE_GET_CONST(T, attr);
return value < v; return value < v;
}); });
return *this; return *this;
...@@ -251,7 +251,7 @@ AttrCompat& AttrCompat::IsNumLT(T v) { ...@@ -251,7 +251,7 @@ AttrCompat& AttrCompat::IsNumLT(T v) {
template <typename T> template <typename T>
AttrCompat& AttrCompat::IsNumLE(T v) { AttrCompat& AttrCompat::IsNumLE(T v) {
conditions_.emplace_back([v](const Attribute& attr) -> bool { conditions_.emplace_back([v](const Attribute& attr) -> bool {
T value = BOOST_GET_CONST(T, attr); T value = PADDLE_GET_CONST(T, attr);
return value <= v; return value <= v;
}); });
return *this; return *this;
...@@ -260,7 +260,7 @@ AttrCompat& AttrCompat::IsNumLE(T v) { ...@@ -260,7 +260,7 @@ AttrCompat& AttrCompat::IsNumLE(T v) {
template <typename T> template <typename T>
AttrCompat& AttrCompat::IsNumEQ(T v) { AttrCompat& AttrCompat::IsNumEQ(T v) {
conditions_.emplace_back([v](const Attribute& attr) -> bool { conditions_.emplace_back([v](const Attribute& attr) -> bool {
T value = BOOST_GET_CONST(T, attr); T value = PADDLE_GET_CONST(T, attr);
return value == v; return value == v;
}); });
return *this; return *this;
...@@ -269,7 +269,7 @@ AttrCompat& AttrCompat::IsNumEQ(T v) { ...@@ -269,7 +269,7 @@ AttrCompat& AttrCompat::IsNumEQ(T v) {
template <typename T> template <typename T>
AttrCompat& AttrCompat::IsNumMatch(bool (*func)(T)) { AttrCompat& AttrCompat::IsNumMatch(bool (*func)(T)) {
conditions_.emplace_back([func](const Attribute& attr) -> bool { conditions_.emplace_back([func](const Attribute& attr) -> bool {
T value = BOOST_GET_CONST(T, attr); T value = PADDLE_GET_CONST(T, attr);
return func(value); return func(value);
}); });
return *this; return *this;
......
...@@ -350,7 +350,7 @@ void QuantDequantFusePass::DeleteQuant(ir::Graph* graph, ...@@ -350,7 +350,7 @@ void QuantDequantFusePass::DeleteQuant(ir::Graph* graph,
Node* quant = subgraph.at(pattern.GetPDNode("quant_node")); Node* quant = subgraph.at(pattern.GetPDNode("quant_node"));
Node* output_scale = subgraph.at(pattern.GetPDNode("output_scale_node")); Node* output_scale = subgraph.at(pattern.GetPDNode("output_scale_node"));
Node* output_act = subgraph.at(pattern.GetPDNode("output_act_node")); Node* output_act = subgraph.at(pattern.GetPDNode("output_act_node"));
int bit_length = BOOST_GET_CONST(int, quant->Op()->GetAttr("bit_length")); int bit_length = PADDLE_GET_CONST(int, quant->Op()->GetAttr("bit_length"));
// Get input scale from tensor // Get input scale from tensor
std::string input_scale_var_name = quant->Op()->Input("InScale").front(); std::string input_scale_var_name = quant->Op()->Input("InScale").front();
...@@ -464,13 +464,13 @@ void QuantDequantFusePass::FuseDequant(ir::Graph* graph, ...@@ -464,13 +464,13 @@ void QuantDequantFusePass::FuseDequant(ir::Graph* graph,
std::unordered_set<const Node*> nodes2rm = {}; std::unordered_set<const Node*> nodes2rm = {};
int bit_length = int bit_length =
BOOST_GET_CONST(int, quantized_op_node->Op()->GetAttr("bit_length")); PADDLE_GET_CONST(int, quantized_op_node->Op()->GetAttr("bit_length"));
int range = ((1 << (bit_length - 1)) - 1); int range = ((1 << (bit_length - 1)) - 1);
std::vector<float> weight_scale; std::vector<float> weight_scale;
int quant_axis = 0; int quant_axis = 0;
if (dequant_op_node->Op()->HasAttr("quant_axis")) { if (dequant_op_node->Op()->HasAttr("quant_axis")) {
quant_axis = quant_axis =
BOOST_GET_CONST(int, dequant_op_node->Op()->GetAttr("quant_axis")); PADDLE_GET_CONST(int, dequant_op_node->Op()->GetAttr("quant_axis"));
} }
// Get weight scale // Get weight scale
if (dequant_type == "fake_channel_wise_dequantize_max_abs") { if (dequant_type == "fake_channel_wise_dequantize_max_abs") {
...@@ -497,7 +497,7 @@ void QuantDequantFusePass::FuseDequant(ir::Graph* graph, ...@@ -497,7 +497,7 @@ void QuantDequantFusePass::FuseDequant(ir::Graph* graph,
nodes2rm.insert(dequant_channel_scale_node); nodes2rm.insert(dequant_channel_scale_node);
} else { } else {
float max_range = float max_range =
BOOST_GET_CONST(float, dequant_op_node->Op()->GetAttr("max_range")); PADDLE_GET_CONST(float, dequant_op_node->Op()->GetAttr("max_range"));
weight_scale.push_back((range * range) / max_range / range); weight_scale.push_back((range * range) / max_range / range);
} }
......
...@@ -413,7 +413,7 @@ void RemovePaddingRecoverPaddingPass::ApplyImpl(ir::Graph* graph) const { ...@@ -413,7 +413,7 @@ void RemovePaddingRecoverPaddingPass::ApplyImpl(ir::Graph* graph) const {
check_flag = false; check_flag = false;
} }
if (BOOST_GET_CONST(int, fc_op->Op()->GetAttr("in_num_col_dims")) != 2) { if (PADDLE_GET_CONST(int, fc_op->Op()->GetAttr("in_num_col_dims")) != 2) {
check_flag = false; check_flag = false;
} }
if (!check_flag) { if (!check_flag) {
......
...@@ -70,7 +70,7 @@ static bool IsOutputOfFC(Node* n) { ...@@ -70,7 +70,7 @@ static bool IsOutputOfFC(Node* n) {
static bool IsFCWithAct(Node* n, const std::string& act_type = "relu") { static bool IsFCWithAct(Node* n, const std::string& act_type = "relu") {
if (n && n->IsOp() && n->Op() && n->Op()->Type() == "fc" && if (n && n->IsOp() && n->Op() && n->Op()->Type() == "fc" &&
n->inputs.size() == 3U && n->outputs.size() == 1U) { n->inputs.size() == 3U && n->outputs.size() == 1U) {
return BOOST_GET_CONST(std::string, n->Op()->GetAttr("activation_type")) == return PADDLE_GET_CONST(std::string, n->Op()->GetAttr("activation_type")) ==
act_type; act_type;
} }
return false; return false;
...@@ -81,7 +81,7 @@ static bool IsFCWithPaddingWeights(Node* n) { ...@@ -81,7 +81,7 @@ static bool IsFCWithPaddingWeights(Node* n) {
if (n && n->IsOp() && n->Op() && n->Op()->Type() == "fc" && if (n && n->IsOp() && n->Op() && n->Op()->Type() == "fc" &&
n->inputs.size() == 3U && n->outputs.size() == 1U) { n->inputs.size() == 3U && n->outputs.size() == 1U) {
if (n->Op()->HasAttr("padding_weights")) { if (n->Op()->HasAttr("padding_weights")) {
res = BOOST_GET_CONST(bool, n->Op()->GetAttr("padding_weights")); res = PADDLE_GET_CONST(bool, n->Op()->GetAttr("padding_weights"));
} }
} }
return res; return res;
......
...@@ -49,7 +49,7 @@ PDNode* BuildSeqPoolConcatPattern(PDPattern* pattern, ...@@ -49,7 +49,7 @@ PDNode* BuildSeqPoolConcatPattern(PDPattern* pattern,
bool this_is_seqpool_op = bool this_is_seqpool_op =
x && x->IsOp() && x->Op()->Type() == "sequence_pool" && x && x->IsOp() && x->Op()->Type() == "sequence_pool" &&
x->Op()->HasAttr("pooltype") && x->Op()->HasAttr("pooltype") &&
BOOST_GET_CONST(std::string, x->Op()->GetAttr("pooltype")) == type && PADDLE_GET_CONST(std::string, x->Op()->GetAttr("pooltype")) == type &&
x->outputs.size() == 2; // seqpool should only have 2 outputs x->outputs.size() == 2; // seqpool should only have 2 outputs
bool satisfied_all = this_is_seqpool_op; bool satisfied_all = this_is_seqpool_op;
if (this_is_seqpool_op) { if (this_is_seqpool_op) {
......
...@@ -101,11 +101,11 @@ void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const { ...@@ -101,11 +101,11 @@ void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const {
std::string output_name = reshape2_out->Name(); std::string output_name = reshape2_out->Name();
auto reshape1_shape = auto reshape1_shape =
BOOST_GET_CONST(std::vector<int>, reshape1_desc->GetAttr("shape")); PADDLE_GET_CONST(std::vector<int>, reshape1_desc->GetAttr("shape"));
auto reshape2_shape = auto reshape2_shape =
BOOST_GET_CONST(std::vector<int>, reshape2_desc->GetAttr("shape")); PADDLE_GET_CONST(std::vector<int>, reshape2_desc->GetAttr("shape"));
auto trans_axis = auto trans_axis =
BOOST_GET_CONST(std::vector<int>, trans_desc->GetAttr("axis")); PADDLE_GET_CONST(std::vector<int>, trans_desc->GetAttr("axis"));
auto* block1 = reshape1_desc->Block(); auto* block1 = reshape1_desc->Block();
auto* block2 = reshape2_desc->Block(); auto* block2 = reshape2_desc->Block();
if (block1 && block2) { if (block1 && block2) {
......
...@@ -79,10 +79,10 @@ bool SimplifyWithBasicOpsPass::SimplifyDropout( ...@@ -79,10 +79,10 @@ bool SimplifyWithBasicOpsPass::SimplifyDropout(
// dropout_op is INT. // dropout_op is INT.
if (dropout_op_desc->HasAttr("is_test")) { if (dropout_op_desc->HasAttr("is_test")) {
if (dropout_op_desc->GetAttrType("is_test") == proto::AttrType::BOOLEAN) { if (dropout_op_desc->GetAttrType("is_test") == proto::AttrType::BOOLEAN) {
is_test = BOOST_GET_CONST(bool, dropout_op_desc->GetAttr("is_test")); is_test = PADDLE_GET_CONST(bool, dropout_op_desc->GetAttr("is_test"));
} else if (dropout_op_desc->GetAttrType("is_test") == } else if (dropout_op_desc->GetAttrType("is_test") ==
proto::AttrType::INT) { proto::AttrType::INT) {
is_test = BOOST_GET_CONST(int, dropout_op_desc->GetAttr("is_test")) == 0 is_test = PADDLE_GET_CONST(int, dropout_op_desc->GetAttr("is_test")) == 0
? false ? false
: true; : true;
} }
...@@ -100,14 +100,14 @@ bool SimplifyWithBasicOpsPass::SimplifyDropout( ...@@ -100,14 +100,14 @@ bool SimplifyWithBasicOpsPass::SimplifyDropout(
if (dropout_op_desc->HasAttr("dropout_implementation")) { if (dropout_op_desc->HasAttr("dropout_implementation")) {
if (dropout_op_desc->GetAttrType("dropout_implementation") == if (dropout_op_desc->GetAttrType("dropout_implementation") ==
proto::AttrType::BOOLEAN) { proto::AttrType::BOOLEAN) {
upscale_in_train = BOOST_GET_CONST( upscale_in_train = PADDLE_GET_CONST(
bool, dropout_op_desc->GetAttr("dropout_implementation")); bool, dropout_op_desc->GetAttr("dropout_implementation"));
} else if (dropout_op_desc->GetAttrType("dropout_implementation") == } else if (dropout_op_desc->GetAttrType("dropout_implementation") ==
proto::AttrType::STRING) { proto::AttrType::STRING) {
upscale_in_train = upscale_in_train =
BOOST_GET_CONST(std::string, PADDLE_GET_CONST(std::string,
dropout_op_desc->GetAttr("dropout_implementation")) == dropout_op_desc->GetAttr(
"upscale_in_train"; "dropout_implementation")) == "upscale_in_train";
} }
} }
...@@ -156,8 +156,8 @@ bool SimplifyWithBasicOpsPass::SimplifyDropout( ...@@ -156,8 +156,8 @@ bool SimplifyWithBasicOpsPass::SimplifyDropout(
// | // |
// \|/ // \|/
// dropout_x -> scale_op -> dropout_out -> next_op -> next_out // dropout_x -> scale_op -> dropout_out -> next_op -> next_out
float scale = float scale = 1.0f - PADDLE_GET_CONST(
1.0f - BOOST_GET_CONST(float, dropout_op_desc->GetAttr("dropout_prob")); float, dropout_op_desc->GetAttr("dropout_prob"));
framework::OpDesc new_op_desc(dropout_op_desc->Block()); framework::OpDesc new_op_desc(dropout_op_desc->Block());
new_op_desc.SetType("scale"); new_op_desc.SetType("scale");
......
...@@ -84,7 +84,7 @@ TEST(IsTestPass, basic) { ...@@ -84,7 +84,7 @@ TEST(IsTestPass, basic) {
for (auto* node : graph->Nodes()) { for (auto* node : graph->Nodes()) {
if (node->IsOp()) { if (node->IsOp()) {
auto* op = node->Op(); auto* op = node->Op();
auto op_name = BOOST_GET_CONST(std::string, op->GetAttr("name")); auto op_name = PADDLE_GET_CONST(std::string, op->GetAttr("name"));
if (op_name == "bn") { if (op_name == "bn") {
ASSERT_EQ(op->Type(), "sync_batch_norm"); ASSERT_EQ(op->Type(), "sync_batch_norm");
} }
......
...@@ -117,18 +117,18 @@ void TransposeFlattenConcatFusePass::RunTransposeFlattenConcatFuse( ...@@ -117,18 +117,18 @@ void TransposeFlattenConcatFusePass::RunTransposeFlattenConcatFuse(
input_nodes[i]->name())); input_nodes[i]->name()));
if (i == 0) { if (i == 0) {
trans_axis0 = BOOST_GET_CONST( trans_axis0 = PADDLE_GET_CONST(
std::vector<int>, std::vector<int>,
subgraph.at(pattern.GetPDNode("transpose" + std::to_string(0))) subgraph.at(pattern.GetPDNode("transpose" + std::to_string(0)))
->Op() ->Op()
->GetAttr("axis")); ->GetAttr("axis"));
flatten_axis0 = BOOST_GET_CONST( flatten_axis0 = PADDLE_GET_CONST(
int, int,
subgraph.at(pattern.GetPDNode("flatten" + std::to_string(0))) subgraph.at(pattern.GetPDNode("flatten" + std::to_string(0)))
->Op() ->Op()
->GetAttr("axis")); ->GetAttr("axis"));
} else { } else {
std::vector<int> trans_axis = BOOST_GET_CONST( std::vector<int> trans_axis = PADDLE_GET_CONST(
std::vector<int>, std::vector<int>,
subgraph.at(pattern.GetPDNode("transpose" + std::to_string(i))) subgraph.at(pattern.GetPDNode("transpose" + std::to_string(i)))
->Op() ->Op()
...@@ -136,7 +136,7 @@ void TransposeFlattenConcatFusePass::RunTransposeFlattenConcatFuse( ...@@ -136,7 +136,7 @@ void TransposeFlattenConcatFusePass::RunTransposeFlattenConcatFuse(
// All axis of transpose should be the same // All axis of transpose should be the same
if (trans_axis0 != trans_axis) return; if (trans_axis0 != trans_axis) return;
int flatten_axis = BOOST_GET_CONST( int flatten_axis = PADDLE_GET_CONST(
int, int,
subgraph.at(pattern.GetPDNode("flatten" + std::to_string(0))) subgraph.at(pattern.GetPDNode("flatten" + std::to_string(0)))
->Op() ->Op()
...@@ -159,11 +159,11 @@ void TransposeFlattenConcatFusePass::RunTransposeFlattenConcatFuse( ...@@ -159,11 +159,11 @@ void TransposeFlattenConcatFusePass::RunTransposeFlattenConcatFuse(
Node *concat_op = subgraph.at(pattern.GetPDNode("concat")); Node *concat_op = subgraph.at(pattern.GetPDNode("concat"));
Node *concat_out = subgraph.at(pattern.GetPDNode("concat_out")); Node *concat_out = subgraph.at(pattern.GetPDNode("concat_out"));
std::vector<std::string> input_names; std::vector<std::string> input_names;
std::vector<int> trans_axis = BOOST_GET_CONST( std::vector<int> trans_axis = PADDLE_GET_CONST(
std::vector<int>, nodes[kTransOffset]->Op()->GetAttr("axis")); std::vector<int>, nodes[kTransOffset]->Op()->GetAttr("axis"));
int flatten_axis = int flatten_axis =
BOOST_GET_CONST(int, nodes[kFlattenOffset]->Op()->GetAttr("axis")); PADDLE_GET_CONST(int, nodes[kFlattenOffset]->Op()->GetAttr("axis"));
int concat_axis = BOOST_GET_CONST(int, concat_op->Op()->GetAttr("axis")); int concat_axis = PADDLE_GET_CONST(int, concat_op->Op()->GetAttr("axis"));
std::string output_name = concat_out->Name(); std::string output_name = concat_out->Name();
for (int i = 0; i < times; i++) { for (int i = 0; i < times; i++) {
......
...@@ -272,8 +272,8 @@ void TrtMapMatmul2MulPass::ApplyImpl(ir::Graph* graph) const { ...@@ -272,8 +272,8 @@ void TrtMapMatmul2MulPass::ApplyImpl(ir::Graph* graph) const {
bool flag = true; bool flag = true;
bool transpose_X = bool transpose_X =
BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X"));
float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha"));
flag = flag && !transpose_X && std::abs(alpha - 1.0) < 1e-5; flag = flag && !transpose_X && std::abs(alpha - 1.0) < 1e-5;
std::vector<int64_t> x_shape = matmul_in_x->Var()->GetShape(); std::vector<int64_t> x_shape = matmul_in_x->Var()->GetShape();
...@@ -359,7 +359,7 @@ void TrtMapMatmulV2ToMulPass::ApplyImpl(ir::Graph* graph) const { ...@@ -359,7 +359,7 @@ void TrtMapMatmulV2ToMulPass::ApplyImpl(ir::Graph* graph) const {
bool flag = true; bool flag = true;
bool trans_x = bool trans_x =
BOOST_GET_CONST(bool, matmul_v2_op->Op()->GetAttr("trans_x")); PADDLE_GET_CONST(bool, matmul_v2_op->Op()->GetAttr("trans_x"));
flag = flag && !trans_x; flag = flag && !trans_x;
std::vector<int64_t> x_shape = matmul_v2_in_x->Var()->GetShape(); std::vector<int64_t> x_shape = matmul_v2_in_x->Var()->GetShape();
...@@ -531,17 +531,17 @@ void TrtSqueeze2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { ...@@ -531,17 +531,17 @@ void TrtSqueeze2MatmulFusePass::ApplyImpl(ir::Graph* graph) const {
size_t squeeze2_in_x_rank = (squeeze2_in_x->Var()->GetShape()).size(); size_t squeeze2_in_x_rank = (squeeze2_in_x->Var()->GetShape()).size();
std::vector<int> squeeze2_op_axes = std::vector<int> squeeze2_op_axes =
BOOST_GET_CONST(std::vector<int>, squeeze2_op->Op()->GetAttr("axes")); PADDLE_GET_CONST(std::vector<int>, squeeze2_op->Op()->GetAttr("axes"));
flag = flag && squeeze2_in_x_rank == 4 && flag = flag && squeeze2_in_x_rank == 4 &&
squeeze2_op_axes == std::vector<int>{2, 3} && squeeze2_op_axes == std::vector<int>{2, 3} &&
(matmul_in_x->outputs).size() == 1 && (matmul_in_x->outputs).size() == 1 &&
matmul_in_y->Var()->Persistable(); matmul_in_y->Var()->Persistable();
bool transpose_X = bool transpose_X =
BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X"));
bool transpose_Y = bool transpose_Y =
BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y"));
float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha"));
size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size(); size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size();
size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size(); size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size();
flag = flag && !transpose_X && !transpose_Y && flag = flag && !transpose_X && !transpose_Y &&
...@@ -690,16 +690,16 @@ void TrtReshape2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { ...@@ -690,16 +690,16 @@ void TrtReshape2MatmulFusePass::ApplyImpl(ir::Graph* graph) const {
auto reshape2_in_x_shape = reshape2_in_x->Var()->GetShape(); auto reshape2_in_x_shape = reshape2_in_x->Var()->GetShape();
size_t reshape2_in_x_rank = reshape2_in_x_shape.size(); size_t reshape2_in_x_rank = reshape2_in_x_shape.size();
std::vector<int> reshape2_op_shape = std::vector<int> reshape2_op_shape =
BOOST_GET_CONST(std::vector<int>, reshape2_op->Op()->GetAttr("shape")); PADDLE_GET_CONST(std::vector<int>, reshape2_op->Op()->GetAttr("shape"));
flag = flag && reshape2_in_nums == 1 && reshape2_in_x_rank == 4 && flag = flag && reshape2_in_nums == 1 && reshape2_in_x_rank == 4 &&
reshape2_in_x_shape[2] == 1 && reshape2_in_x_shape[3] == 1 && reshape2_in_x_shape[2] == 1 && reshape2_in_x_shape[3] == 1 &&
reshape2_op_shape.size() == 2 && (matmul_in_x->outputs).size() == 1; reshape2_op_shape.size() == 2 && (matmul_in_x->outputs).size() == 1;
bool transpose_X = bool transpose_X =
BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X"));
bool transpose_Y = bool transpose_Y =
BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y"));
float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha"));
size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size(); size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size();
size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size(); size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size();
flag = flag && !transpose_X && !transpose_Y && flag = flag && !transpose_X && !transpose_Y &&
...@@ -786,7 +786,7 @@ void TrtFlatten2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { ...@@ -786,7 +786,7 @@ void TrtFlatten2MatmulFusePass::ApplyImpl(ir::Graph* graph) const {
auto flatten2_in_x_shape = flatten2_in_x->Var()->GetShape(); auto flatten2_in_x_shape = flatten2_in_x->Var()->GetShape();
size_t flatten2_in_x_rank = flatten2_in_x_shape.size(); size_t flatten2_in_x_rank = flatten2_in_x_shape.size();
int flatten2_axis = int flatten2_axis =
BOOST_GET_CONST(int, flatten2_op->Op()->GetAttr("axis")); PADDLE_GET_CONST(int, flatten2_op->Op()->GetAttr("axis"));
// only convert matmul to mul when the flatten2 has a single input // only convert matmul to mul when the flatten2 has a single input
// and the rank of input is 4 and the size of the output of matmul // and the rank of input is 4 and the size of the output of matmul
// is 1. // is 1.
...@@ -795,10 +795,10 @@ void TrtFlatten2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { ...@@ -795,10 +795,10 @@ void TrtFlatten2MatmulFusePass::ApplyImpl(ir::Graph* graph) const {
(matmul_in_x->outputs).size() == 1; (matmul_in_x->outputs).size() == 1;
bool transpose_X = bool transpose_X =
BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X"));
bool transpose_Y = bool transpose_Y =
BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y"));
float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha"));
size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size(); size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size();
size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size(); size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size();
pattern_found = pattern_found && !transpose_X && !transpose_Y && pattern_found = pattern_found && !transpose_X && !transpose_Y &&
......
...@@ -66,10 +66,10 @@ static int BuildFusion(Graph* graph, const std::string& name_scope) { ...@@ -66,10 +66,10 @@ static int BuildFusion(Graph* graph, const std::string& name_scope) {
Node* reshape2_qkv_out, Node* reshape2_qkv_out,
Node* scale, Node* scale,
Node* scale_out) { Node* scale_out) {
auto scale_attr = BOOST_GET_CONST(float, scale->Op()->GetAttr("scale")); auto scale_attr = PADDLE_GET_CONST(float, scale->Op()->GetAttr("scale"));
// auto scale_bias = BOOST_GET_CONST(float, scale->Op()->GetAttr("bias")); // auto scale_bias = PADDLE_GET_CONST(float, scale->Op()->GetAttr("bias"));
// bool after_scale = // bool after_scale =
// BOOST_GET_CONST(bool, scale->Op()->GetAttr("bias_after_scale")); // PADDLE_GET_CONST(bool, scale->Op()->GetAttr("bias_after_scale"));
// create multihead // create multihead
OpDesc multihead_op_desc(mul0->Op()->Block()); OpDesc multihead_op_desc(mul0->Op()->Block());
...@@ -89,7 +89,8 @@ static int BuildFusion(Graph* graph, const std::string& name_scope) { ...@@ -89,7 +89,8 @@ static int BuildFusion(Graph* graph, const std::string& name_scope) {
auto reshape_desc = reshape2->Op(); auto reshape_desc = reshape2->Op();
int head_number = int head_number =
BOOST_GET_CONST(std::vector<int>, reshape_desc->GetAttr("shape")).at(2); PADDLE_GET_CONST(std::vector<int>, reshape_desc->GetAttr("shape"))
.at(2);
ReplaceOutputVar(mul0, mul0_out, q_var_node); ReplaceOutputVar(mul0, mul0_out, q_var_node);
ReplaceOutputVar(mul1, mul1_out, k_var_node); ReplaceOutputVar(mul1, mul1_out, k_var_node);
...@@ -822,7 +823,7 @@ int TrtMultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, ...@@ -822,7 +823,7 @@ int TrtMultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph,
Node* eltadd2, Node* eltadd2,
Node* matmul_qk, Node* matmul_qk,
Node* reshape2_qkv) { Node* reshape2_qkv) {
auto scale_attr = BOOST_GET_CONST(float, scale->Op()->GetAttr("scale")); auto scale_attr = PADDLE_GET_CONST(float, scale->Op()->GetAttr("scale"));
// mul (B * S * Hidden) x (Hidden * 3 * N * H) = (B * S * 3 * N * H) // mul (B * S * Hidden) x (Hidden * 3 * N * H) = (B * S * 3 * N * H)
// bias (B * S * 3 * N * H) + bias (3 * N * H) // bias (B * S * 3 * N * H) + bias (3 * N * H)
...@@ -909,7 +910,8 @@ int TrtMultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, ...@@ -909,7 +910,8 @@ int TrtMultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph,
auto reshape_desc = reshape2->Op(); auto reshape_desc = reshape2->Op();
int head_number = int head_number =
BOOST_GET_CONST(std::vector<int>, reshape_desc->GetAttr("shape")).at(2); PADDLE_GET_CONST(std::vector<int>, reshape_desc->GetAttr("shape"))
.at(2);
OpDesc multihead_op_desc(mul0->Op()->Block()); OpDesc multihead_op_desc(mul0->Op()->Block());
multihead_op_desc.SetType("multihead_matmul"); multihead_op_desc.SetType("multihead_matmul");
...@@ -935,11 +937,11 @@ int TrtMultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, ...@@ -935,11 +937,11 @@ int TrtMultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph,
auto* add2_op_desc = eltadd2->Op(); auto* add2_op_desc = eltadd2->Op();
if (add0_op_desc->HasAttr("out_threshold")) { if (add0_op_desc->HasAttr("out_threshold")) {
auto out_scale0 = auto out_scale0 =
BOOST_GET_CONST(float, add0_op_desc->GetAttr("out_threshold")); PADDLE_GET_CONST(float, add0_op_desc->GetAttr("out_threshold"));
auto out_scale1 = auto out_scale1 =
BOOST_GET_CONST(float, add1_op_desc->GetAttr("out_threshold")); PADDLE_GET_CONST(float, add1_op_desc->GetAttr("out_threshold"));
auto out_scale2 = auto out_scale2 =
BOOST_GET_CONST(float, add2_op_desc->GetAttr("out_threshold")); PADDLE_GET_CONST(float, add2_op_desc->GetAttr("out_threshold"));
auto out_scale_max = std::max(out_scale0, out_scale1); auto out_scale_max = std::max(out_scale0, out_scale1);
out_scale_max = std::max(out_scale_max, out_scale2); out_scale_max = std::max(out_scale_max, out_scale2);
multihead_op_desc.SetAttr("fc_out_threshold", out_scale_max); multihead_op_desc.SetAttr("fc_out_threshold", out_scale_max);
...@@ -950,7 +952,7 @@ int TrtMultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, ...@@ -950,7 +952,7 @@ int TrtMultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph,
if (matmul_qk_op_desc->HasAttr("Input_scale")) { if (matmul_qk_op_desc->HasAttr("Input_scale")) {
multihead_op_desc.SetAttr("qkv2context_plugin_int8", true); multihead_op_desc.SetAttr("qkv2context_plugin_int8", true);
if (softmax_qk_op_desc->HasAttr("out_threshold")) { if (softmax_qk_op_desc->HasAttr("out_threshold")) {
auto qkv_plugin_scale = BOOST_GET_CONST( auto qkv_plugin_scale = PADDLE_GET_CONST(
float, softmax_qk_op_desc->GetAttr("out_threshold")); float, softmax_qk_op_desc->GetAttr("out_threshold"));
multihead_op_desc.SetAttr("dp_probs", qkv_plugin_scale); multihead_op_desc.SetAttr("dp_probs", qkv_plugin_scale);
} }
...@@ -1337,7 +1339,8 @@ int TrtMultiHeadMatmulV3FusePass::BuildFusionV3(Graph* graph, ...@@ -1337,7 +1339,8 @@ int TrtMultiHeadMatmulV3FusePass::BuildFusionV3(Graph* graph,
Node* reshape2, Node* reshape2,
Node* reshape2_qkv_out, Node* reshape2_qkv_out,
Node* matmul_qk) { Node* matmul_qk) {
auto scale_attr = BOOST_GET_CONST(float, matmul_qk->Op()->GetAttr("alpha")); auto scale_attr =
PADDLE_GET_CONST(float, matmul_qk->Op()->GetAttr("alpha"));
// mul (B * S * Hidden) x (Hidden * 3 * N * H) = (B * S * 3 * N * H) // mul (B * S * Hidden) x (Hidden * 3 * N * H) = (B * S * 3 * N * H)
// bias (B * S * 3 * N * H) + bias (3 * N * H) // bias (B * S * 3 * N * H) + bias (3 * N * H)
...@@ -1424,7 +1427,8 @@ int TrtMultiHeadMatmulV3FusePass::BuildFusionV3(Graph* graph, ...@@ -1424,7 +1427,8 @@ int TrtMultiHeadMatmulV3FusePass::BuildFusionV3(Graph* graph,
auto reshape_desc = reshape2->Op(); auto reshape_desc = reshape2->Op();
int head_number = int head_number =
BOOST_GET_CONST(std::vector<int>, reshape_desc->GetAttr("shape")).at(2); PADDLE_GET_CONST(std::vector<int>, reshape_desc->GetAttr("shape"))
.at(2);
OpDesc multihead_op_desc(mul0->Op()->Block()); OpDesc multihead_op_desc(mul0->Op()->Block());
multihead_op_desc.SetType("multihead_matmul"); multihead_op_desc.SetType("multihead_matmul");
......
...@@ -153,9 +153,9 @@ void UnsqueezeEltwiseFusePass::ApplyImpl(ir::Graph *graph) const { ...@@ -153,9 +153,9 @@ void UnsqueezeEltwiseFusePass::ApplyImpl(ir::Graph *graph) const {
size_t eltwise_in_x_rank = (subgraph.at(x)->Var()->GetShape()).size(); size_t eltwise_in_x_rank = (subgraph.at(x)->Var()->GetShape()).size();
size_t unsqz_in_rank = (subgraph.at(y)->Var()->GetShape()).size(); size_t unsqz_in_rank = (subgraph.at(y)->Var()->GetShape()).size();
std::vector<int> unsqz_op_axes = std::vector<int> unsqz_op_axes =
BOOST_GET_CONST(std::vector<int>, unsqz_op->Op()->GetAttr("axes")); PADDLE_GET_CONST(std::vector<int>, unsqz_op->Op()->GetAttr("axes"));
int eltwise_op_axis = int eltwise_op_axis =
BOOST_GET_CONST(int, eltwise_op->Op()->GetAttr("axis")); PADDLE_GET_CONST(int, eltwise_op->Op()->GetAttr("axis"));
if (eltwise_in_x_rank == 4 && unsqz_in_rank == 2 && if (eltwise_in_x_rank == 4 && unsqz_in_rank == 2 &&
unsqz_op_axes == std::vector<int>{2, 3} && eltwise_op_axis == -1) { unsqz_op_axes == std::vector<int>{2, 3} && eltwise_op_axis == -1) {
......
...@@ -121,17 +121,17 @@ ProgramDesc GetLmMainProgram() { ...@@ -121,17 +121,17 @@ ProgramDesc GetLmMainProgram() {
int64_t batch_size = 20; int64_t batch_size = 20;
auto& op1 = global_block.AllOps()[1]; auto& op1 = global_block.AllOps()[1];
auto shape1 = BOOST_GET_CONST(std::vector<int64_t>, op1->GetAttr("shape")); auto shape1 = PADDLE_GET_CONST(std::vector<int64_t>, op1->GetAttr("shape"));
shape1[0] = batch_size * 20; shape1[0] = batch_size * 20;
op1->SetAttr("shape", shape1); op1->SetAttr("shape", shape1);
auto& op2 = global_block.AllOps()[2]; auto& op2 = global_block.AllOps()[2];
auto shape2 = BOOST_GET_CONST(std::vector<int64_t>, op2->GetAttr("shape")); auto shape2 = PADDLE_GET_CONST(std::vector<int64_t>, op2->GetAttr("shape"));
shape2[0] = batch_size; shape2[0] = batch_size;
op2->SetAttr("shape", shape2); op2->SetAttr("shape", shape2);
auto& op3 = global_block.AllOps()[3]; auto& op3 = global_block.AllOps()[3];
auto shape3 = BOOST_GET_CONST(std::vector<int64_t>, op3->GetAttr("shape")); auto shape3 = PADDLE_GET_CONST(std::vector<int64_t>, op3->GetAttr("shape"));
shape3[0] = batch_size; shape3[0] = batch_size;
op3->SetAttr("shape", shape3); op3->SetAttr("shape", shape3);
return main_prog; return main_prog;
...@@ -228,7 +228,7 @@ void TestShareWorkQueue(const ProgramDesc& prog, ...@@ -228,7 +228,7 @@ void TestShareWorkQueue(const ProgramDesc& prog,
FetchList fetch_list = core->Run(feed_names, feed_tensors); FetchList fetch_list = core->Run(feed_names, feed_tensors);
for (size_t i = 0; i < fetch_list.size(); ++i) { for (size_t i = 0; i < fetch_list.size(); ++i) {
const float* fetch_data = const float* fetch_data =
BOOST_GET_CONST(LoDTensor, fetch_list[i]).data<float>(); PADDLE_GET_CONST(LoDTensor, fetch_list[i]).data<float>();
ASSERT_FLOAT_EQ(*fetch_data, fetch_results.at(i)); ASSERT_FLOAT_EQ(*fetch_data, fetch_results.at(i));
} }
}; };
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册