未验证 提交 c4906360 编写于 作者: W Weilong Wu 提交者: GitHub

support warnings for extra attrs and axis in non default value (#50967)

* support warnings for extra attrs and axis in non default value

* support extra_attr warnings and add test case

* fix dict name

* polish

* polish

* polish
上级 2d36c9a9
......@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from collections import defaultdict
from collections.abc import Iterable
......@@ -127,6 +126,50 @@ _cuda_graph_enable_standalone_executor_ = os.environ.get(
'FLAGS_CUDA_GRAPH_USE_STANDALONE_EXECUTOR', 0
)
# special_op_attrs, extra_op_attrs are prepared for printing warnings
# when turning on FLAGS_print_extra_attrs
special_op_attrs = {
"elementwise_add": [{"axis": -1}],
"elementwise_sub": [{"axis": -1}],
"elementwise_mul": [{"axis": -1}],
"elementwise_div": [{"axis": -1}],
"elementwise_max": [{"axis": -1}],
"elementwise_min": [{"axis": -1}],
"elementwise_pow": [{"axis": -1}],
"elementwise_mod": [{"axis": -1}],
"elementwise_floordiv": [{"axis": -1}],
"less_than": [{"axis": -1}],
"less_equal": [{"axis": -1}],
"greater_than": [{"axis": -1}],
"greater_equal": [{"axis": -1}],
"equal": [{"axis": -1}],
"not_equal": [{"axis": -1}],
"amax": [{"reduce_all": False}],
"amin": [{"reduce_all": False}],
"any": [{"reduce_all": False}],
"frobenius_norm": [{"reduce_all": False}],
"logsumexp": [{"reduce_all": False}],
"reduce_max": [{"reduce_all": False}],
"reduce_min": [{"reduce_all": False}],
"reduce_mean": [{"reduce_all": False}],
"reduce_prod": [{"reduce_all": False}],
"reduce_sum": [{"reduce_all": False}],
}
extra_op_attrs = {
"gather": ["overwrite"],
"graph_reindex": ["flag_buffer_hashtable"],
"graph_sample_neighbors": ["flag_perm_buffer"],
"relu6": ["threshold"],
"swish": ["beta"],
"hsigmoid_loss": ["remote_prefetch"],
"max_pool2d_with_index": ["global_pooling"],
"uniform": ["diag_num"],
"unique": ["is_sorted"],
}
# Some explanation of our execution system 2022.03
# For now we have 3 kinds of execution system, since we refactored dygraph mode to
# build a fast execution system for dynamic mode. But we can't just remove all legacy
......@@ -3064,6 +3107,11 @@ class Operator:
attr_val = op_attrs[attr_name]
self._update_desc_attr(attr_name, attr_val)
for attr_name in extra_attrs_map.keys():
if os.environ.get('FLAGS_print_extra_attrs', '0') == '1':
warnings.warn(
"op %s use extra_attr: %s" % (type, attr_name)
)
if (attr_name not in op_attrs) or (
op_attrs[attr_name] is None
):
......@@ -3073,6 +3121,34 @@ class Operator:
else:
self._update_desc_attr(attr_name, op_attrs[attr_name])
if os.environ.get('FLAGS_print_extra_attrs', '0') == '1':
if type in extra_op_attrs:
attrs = extra_op_attrs.get(type, [])
for attr in attrs:
if attr in op_attrs.keys():
warnings.warn(
"op %s use extra_attr: %s" % (type, attr)
)
if type in special_op_attrs:
attrs = special_op_attrs.get(type, [])
for attr in attrs:
a_name = list(attr.keys())[0]
default_value = list(attr.values())[0]
if (
a_name in op_attrs.keys()
and default_value != op_attrs[a_name]
):
warnings.warn(
"op %s's attr %s = %s is not the default value: %s"
% (
type,
a_name,
op_attrs[a_name],
default_value,
)
)
# proto.attrs doesn't include ipu_index
if core.is_compiled_with_ipu():
if global_ipu_index >= 0:
......
......@@ -12,7 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import warnings
import numpy as np
from op_test import OpTest, convert_float_to_uint16
......@@ -23,6 +25,7 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid import Program, program_guard
from paddle.fluid.layer_helper import LayerHelper
paddle.enable_static()
......@@ -2140,6 +2143,30 @@ class TestRelu6API(unittest.TestCase):
F.relu6(x_fp16)
class TestRelu6APIWarnings(unittest.TestCase):
def test_warnings(self):
with warnings.catch_warnings(record=True) as context:
warnings.simplefilter("always")
paddle.enable_static()
helper = LayerHelper("relu6")
data = paddle.static.data(
name='data', shape=[None, 3, 32, 32], dtype='float32'
)
out = helper.create_variable_for_type_inference(dtype=data.dtype)
os.environ['FLAGS_print_extra_attrs'] = "1"
helper.append_op(
type="relu6",
inputs={'X': data},
outputs={'Out': out},
attrs={'threshold': 6.0},
)
self.assertTrue(
"op relu6 use extra_attr: threshold" in str(context[-1].message)
)
os.environ['FLAGS_print_extra_attrs'] = "0"
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
x_dtype = x.dtype
if x_dtype == 'float16':
......
......@@ -12,7 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import warnings
import numpy as np
from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
......@@ -20,6 +22,7 @@ from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.layer_helper import LayerHelper
class TestElementwiseAddOp(OpTest):
......@@ -866,6 +869,32 @@ class TestTensorAddNumpyScalar(unittest.TestCase):
self.assertTrue(c.dtype == core.VarDesc.VarType.FP16)
class TestTensorAddAPIWarnings(unittest.TestCase):
def test_warnings(self):
with warnings.catch_warnings(record=True) as context:
warnings.simplefilter("always")
paddle.enable_static()
helper = LayerHelper("elementwise_add")
data = paddle.static.data(
name='data', shape=[None, 3, 32, 32], dtype='float32'
)
out = helper.create_variable_for_type_inference(dtype=data.dtype)
os.environ['FLAGS_print_extra_attrs'] = "1"
helper.append_op(
type="elementwise_add",
inputs={'X': data, 'Y': data},
outputs={'Out': out},
attrs={'axis': 1, 'use_mkldnn': False},
)
self.assertTrue(
"op elementwise_add's attr axis = 1 is not the default value: -1"
in str(context[-1].message)
)
os.environ['FLAGS_print_extra_attrs'] = "0"
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -12,13 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import warnings
import numpy as np
from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci
import paddle
import paddle.fluid as fluid
from paddle.fluid.layer_helper import LayerHelper
class TestElementwiseOp(OpTest):
......@@ -615,6 +618,32 @@ class TestFloatElementwiseSubop1(unittest.TestCase):
paddle.enable_static()
class TestTensorSubAPIWarnings(unittest.TestCase):
def test_warnings(self):
with warnings.catch_warnings(record=True) as context:
warnings.simplefilter("always")
paddle.enable_static()
helper = LayerHelper("elementwise_sub")
data = paddle.static.data(
name='data', shape=[None, 3, 32, 32], dtype='float32'
)
out = helper.create_variable_for_type_inference(dtype=data.dtype)
os.environ['FLAGS_print_extra_attrs'] = "1"
helper.append_op(
type="elementwise_sub",
inputs={'X': data, 'Y': data},
outputs={'Out': out},
attrs={'axis': 1, 'use_mkldnn': False},
)
self.assertTrue(
"op elementwise_sub's attr axis = 1 is not the default value: -1"
in str(context[-1].message)
)
os.environ['FLAGS_print_extra_attrs'] = "0"
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -15,6 +15,7 @@
import os
import tempfile
import unittest
import warnings
import gradient_checker
import numpy as np
......@@ -30,6 +31,7 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.inference as paddle_infer
from paddle import enable_static
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.op import Operator
......@@ -751,6 +753,31 @@ class TestSumTripleGradCheck(unittest.TestCase):
self.func(p)
class TestSumAPIWarnings(unittest.TestCase):
def test_warnings(self):
with warnings.catch_warnings(record=True) as context:
warnings.simplefilter("always")
paddle.enable_static()
helper = LayerHelper("sum")
data = paddle.static.data(
name='data', shape=[32, 32], dtype='float32'
)
out = helper.create_variable_for_type_inference(dtype=data.dtype)
attrs = {'dim': [1], 'keep_dim': True, 'reduce_all': True}
os.environ["FLAGS_print_extra_attrs"] = '1'
helper.append_op(
type="reduce_sum",
inputs={'X': data},
outputs={'Out': out},
attrs=attrs,
)
self.assertTrue(
"op reduce_sum's attr reduce_all = True is not the default value: False"
in str(context[-1].message)
)
os.environ["FLAGS_print_extra_attrs"] = '0'
if __name__ == "__main__":
enable_static()
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册