未验证 提交 433e67bd 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][py2] remove unnecessary u-prefix in string literal (#47727)

* [CodeStyle][py2] remove unnecessary u-prefix in string literal

* `"{}".format(x)` -> `x`

* remove duplicated dtype literals

* revert changes in data_feeder.py

* remove u-prefix in data_feeder

* revert remove duplicated dtype literals in data_feeder

* remove unnecessary convert to str

* for test

* add some comments

* refine comment

* restore a removed str conversion

* re-trigger all ci, empty commit
上级 18d33346
......@@ -60,7 +60,7 @@ class TestMovingAverageAbsMaxScaleOp(unittest.TestCase):
moving_average_abs_max_scale_ops = [
op
for op in main_program.blocks[0].ops
if op.type == u'moving_average_abs_max_scale'
if op.type == 'moving_average_abs_max_scale'
]
assert (
len(moving_average_abs_max_scale_ops) == 1
......
......@@ -50,6 +50,7 @@ def convert_dtype(dtype):
if dtype in _PADDLE_DTYPE_2_NUMPY_DTYPE:
return _PADDLE_DTYPE_2_NUMPY_DTYPE[dtype]
elif isinstance(dtype, type):
# This branch is for NumPy scalar types
if dtype in [
bool,
np.float16,
......@@ -66,6 +67,7 @@ def convert_dtype(dtype):
]:
return dtype.__name__
else:
# This branch is for np.dtype and str
if dtype in [
'bool',
'float16',
......@@ -79,24 +81,10 @@ def convert_dtype(dtype):
'uint8',
'complex64',
'complex128',
u'bool',
u'float16',
u'uint16',
u'float32',
u'float64',
u'int8',
u'int16',
u'int32',
u'int64',
u'uint8',
u'complex64',
u'complex128',
]:
# this code is a little bit dangerous, since error could happen
# when casting no-ascii code to str in python2.
# but since the set itself is limited, so currently, it is good.
# however, jointly supporting python2 and python3, (as well as python4 maybe)
# may still be a long-lasting problem.
# NOTE(SigureMo): Since the np.dtype object is not an instance of
# type, so it will not be handled by the previous branch. We need
# to convert it to str here.
return str(dtype)
# NOTE(zhangbo): Now numpy does not support bfloat, and paddle use uint16 to represent bfloat16, and there binaries are consistent.
if dtype in ['bfloat16']:
......@@ -104,7 +92,8 @@ def convert_dtype(dtype):
raise TypeError(
"dtype must be any of [bool, float16, uint16, float32, float64, int8, int16, "
"int32, int64, uint8, complex64, complex128], but received %s" % dtype
"int32, int64, uint8, complex64, complex128, bfloat16], but received %s"
% dtype
)
......
......@@ -188,24 +188,24 @@ class SimpleNet(BackwardNet):
super().__init__()
self.stop_gradient_grad_vars = set(
[
u'x_no_grad@GRAD',
u'x2_no_grad@GRAD',
u'x3_no_grad@GRAD',
u'label_no_grad@GRAD',
'x_no_grad@GRAD',
'x2_no_grad@GRAD',
'x3_no_grad@GRAD',
'label_no_grad@GRAD',
]
)
self.no_grad_vars = set()
self.params_names = set([u'w2v', u'fc_predict.b_0', u'fc_w'])
self.params_names = set(['w2v', 'fc_predict.b_0', 'fc_w'])
self.op_path = [
u'lookup_table_v2',
u'lookup_table_v2', # embedding
u'elementwise_add', # merge
u'mul',
u'elementwise_add',
u'softmax', # fc
u'elementwise_sub',
u'square',
u'reduce_mean',
'lookup_table_v2',
'lookup_table_v2', # embedding
'elementwise_add', # merge
'mul',
'elementwise_add',
'softmax', # fc
'elementwise_sub',
'square',
'reduce_mean',
] # loss
self.shape = [16, 50]
......
......@@ -1471,9 +1471,9 @@ class TestRemoteHsigmoid(TestDistLookupTableBase):
for name in ["epmap", "table_names", "epmap"]:
assert op.has_attr(name)
if name == "epmap":
assert op.attr(name)[0] == u'127.0.0.1:6174'
assert op.attr(name)[0] == '127.0.0.1:6174'
elif name == "table_names":
assert op.attr(name)[0] == u'hierarchical_sigmoid_0.w_0'
assert op.attr(name)[0] == 'hierarchical_sigmoid_0.w_0'
else:
assert op.attr(name) == 3
elif op.type == "lookup_table":
......@@ -1484,7 +1484,7 @@ class TestRemoteHsigmoid(TestDistLookupTableBase):
for op in trainer.blocks[0].ops:
if op.type == "recv":
assert len(op.output("Out")) == 1
assert op.output("Out")[0] == u'hierarchical_sigmoid_0.b_0'
assert op.output("Out")[0] == 'hierarchical_sigmoid_0.b_0'
op_count += 1
assert op_count == 1
......
......@@ -106,7 +106,7 @@ class TestFeedData(unittest.TestCase):
"The fed Variable %r should have dimensions = %r, "
"shape = %r, but received fed shape %r on each device"
% (
u'data',
'data',
len(in_shape_tuple),
in_shape_tuple,
error_shape_list,
......@@ -120,7 +120,7 @@ class TestFeedData(unittest.TestCase):
self.assertEqual(
str(dtype_mismatch_err.exception),
"The data type of fed Variable %r must be 'int64', but "
"received 'float64'" % (u'label'),
"received 'float64'" % ('label'),
)
def _test_feed_data_dtype_mismatch(self, use_cuda, use_parallel_executor):
......
......@@ -125,7 +125,7 @@ class TestPrintOpBackward(unittest.TestCase):
loss = paddle.static.Print(loss)
paddle.optimizer.Adam().minimize(loss)
print_ops = [op for op in main.blocks[0].ops if op.type == u'print']
print_ops = [op for op in main.blocks[0].ops if op.type == 'print']
assert len(print_ops) == 2, "The number of print op should be 2"
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
......
......@@ -40,6 +40,7 @@ def set_default_dtype(d):
"""
if isinstance(d, type):
# This branch is for NumPy scalar types
if d in [np.float16, np.float32, np.float64]:
d = d.__name__
else:
......@@ -48,19 +49,11 @@ def set_default_dtype(d):
", but received %s" % d.__name__
)
else:
if d in [
'float16',
'float32',
'float64',
u'float16',
u'float32',
u'float64',
]:
# this code is a little bit dangerous, since error could happen
# when casting no-ascii code to str in python2.
# but since the set itself is limited, so currently, it is good.
# however, jointly supporting python2 and python3, (as well as python4 maybe)
# may still be a long-lasting problem.
# This branch is for np.dtype and str
if d in ['float16', 'float32', 'float64']:
# NOTE(SigureMo): Since the np.dtype object is not an instance of
# type, so it will not be handled by the previous branch. We need
# to convert it to str here.
d = str(d)
else:
raise TypeError(
......
......@@ -1357,14 +1357,6 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
'int64',
'complex64',
'complex128',
u'bool',
u'float16',
u'float32',
u'float64',
u'int32',
u'int64',
u'complex64',
u'complex128',
],
'sum',
)
......
......@@ -90,7 +90,7 @@ def prune_phi_kernels():
content = content.replace(p, '')
with open(op_file, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(content))
f.write(content)
print('We erase all grad op and kernel for Paddle-Inference lib.')
print('%50s%10s' % ('type', 'count'))
......@@ -137,7 +137,7 @@ def append_fluid_kernels():
return False
with open(file_name, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(new_content))
f.write(new_content)
# 2. add op and kernel register
op_white_list.append("tensorrt_engine")
......@@ -170,7 +170,7 @@ def append_fluid_kernels():
matches[0], matches[0].replace(k, k + "__")
)
with open(op_file, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(content))
f.write(content)
return True
......
......@@ -170,7 +170,7 @@ if __name__ == '__main__':
content = content.replace(i, '')
with open(op_file, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(content))
f.write(content)
# 2. update operators/CMakeLists.txt
cmake_file = os.path.join(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册