未验证 提交 90cf2299 编写于 作者: Z zlsh80826 提交者: GitHub

Fix numpy 1.20+ deprecation warnings (#42929)

* Replace np.bool/np.bool8 with np.bool_

* Replace np.object with np.object_

* Replace np.complex with np.complex128

* Replace np.float with np.float64

* Replace np.int with np.int_

* Rerun pre-commit for newer pre-commit configuration

* Use builtin bool instead of np.bool_ based on the context
上级 a0363d18
...@@ -1811,8 +1811,8 @@ class Fleet(object): ...@@ -1811,8 +1811,8 @@ class Fleet(object):
if (param._grad_ivar() is not None) and ( if (param._grad_ivar() is not None) and (
param._grad_ivar().dtype == core.VarDesc.VarType.FP32) param._grad_ivar().dtype == core.VarDesc.VarType.FP32)
] ]
temp_found_inf_fp16 = to_variable(np.array([0]).astype(np.bool)) temp_found_inf_fp16 = to_variable(np.array([0]).astype(np.bool_))
temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool)) temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool_))
if len(param_grads_fp16): if len(param_grads_fp16):
_C_ops.check_finite_and_unscale(param_grads_fp16, self._scale, _C_ops.check_finite_and_unscale(param_grads_fp16, self._scale,
param_grads_fp16, param_grads_fp16,
......
...@@ -200,8 +200,8 @@ def GroupShardedScaler(scaler): ...@@ -200,8 +200,8 @@ def GroupShardedScaler(scaler):
else: else:
param_grads_fp32.append(param.grad) param_grads_fp32.append(param.grad)
temp_found_inf_fp16 = to_variable(np.array([0]).astype(np.bool)) temp_found_inf_fp16 = to_variable(np.array([0]).astype(np.bool_))
temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool)) temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool_))
device = "cpu" if optimizer.offload else "gpu" device = "cpu" if optimizer.offload else "gpu"
dev_id = 0 if device == "cpu" else int( dev_id = 0 if device == "cpu" else int(
......
...@@ -201,8 +201,8 @@ def ShardingScaler(scaler): ...@@ -201,8 +201,8 @@ def ShardingScaler(scaler):
else: else:
param_grads_fp32.append(param.grad) param_grads_fp32.append(param.grad)
temp_found_inf_fp16 = to_variable(np.array([0]).astype(np.bool)) temp_found_inf_fp16 = to_variable(np.array([0]).astype(np.bool_))
temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool)) temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool_))
device = "cpu" if optimizer.offload else "gpu" device = "cpu" if optimizer.offload else "gpu"
dev_id = 0 if device == "cpu" else int( dev_id = 0 if device == "cpu" else int(
......
...@@ -49,7 +49,7 @@ def convert_dtype(dtype): ...@@ -49,7 +49,7 @@ def convert_dtype(dtype):
return _PADDLE_DTYPE_2_NUMPY_DTYPE[dtype] return _PADDLE_DTYPE_2_NUMPY_DTYPE[dtype]
elif isinstance(dtype, type): elif isinstance(dtype, type):
if dtype in [ if dtype in [
np.bool, np.float16, np.uint16, np.float32, np.float64, np.int8, bool, np.float16, np.uint16, np.float32, np.float64, np.int8,
np.int16, np.int32, np.int64, np.uint8, np.complex64, np.int16, np.int32, np.int64, np.uint8, np.complex64,
np.complex128 np.complex128
]: ]:
......
...@@ -129,11 +129,11 @@ class AmpScaler(object): ...@@ -129,11 +129,11 @@ class AmpScaler(object):
self._decr_count = 0 self._decr_count = 0
self._use_dynamic_loss_scaling = use_dynamic_loss_scaling self._use_dynamic_loss_scaling = use_dynamic_loss_scaling
self._found_inf = to_variable(np.array([0]).astype(np.bool)) self._found_inf = to_variable(np.array([0]).astype(np.bool_))
self._temp_found_inf_fp16 = to_variable( self._temp_found_inf_fp16 = to_variable(
np.array([0]).astype(np.bool)) np.array([0]).astype(np.bool_))
self._temp_found_inf_fp32 = to_variable( self._temp_found_inf_fp32 = to_variable(
np.array([0]).astype(np.bool)) np.array([0]).astype(np.bool_))
self._scale = to_variable( self._scale = to_variable(
np.array([self._init_loss_scaling]).astype(np.float32)) np.array([self._init_loss_scaling]).astype(np.float32))
self._cache_founf_inf = None self._cache_founf_inf = None
......
...@@ -483,7 +483,7 @@ def _as_lodtensor(data, place, dtype=None): ...@@ -483,7 +483,7 @@ def _as_lodtensor(data, place, dtype=None):
data = np.array([data]).astype(dtype) data = np.array([data]).astype(dtype)
elif isinstance(data, (list, tuple)): elif isinstance(data, (list, tuple)):
data = np.array(data) data = np.array(data)
if data.dtype == np.object: if data.dtype == np.object_:
raise TypeError( raise TypeError(
"\n\tFaild to convert input data to a regular ndarray :\n\t* Usually " "\n\tFaild to convert input data to a regular ndarray :\n\t* Usually "
"this means the input data contains nested lists with different lengths. " "this means the input data contains nested lists with different lengths. "
......
...@@ -1109,7 +1109,7 @@ def convert_np_dtype_to_dtype_(np_dtype): ...@@ -1109,7 +1109,7 @@ def convert_np_dtype_to_dtype_(np_dtype):
return core.VarDesc.VarType.INT16 return core.VarDesc.VarType.INT16
elif dtype == np.int64: elif dtype == np.int64:
return core.VarDesc.VarType.INT64 return core.VarDesc.VarType.INT64
elif dtype == np.bool: elif dtype == np.bool_:
return core.VarDesc.VarType.BOOL return core.VarDesc.VarType.BOOL
elif dtype == np.uint16: elif dtype == np.uint16:
# since there is still no support for bfloat16 in NumPy, # since there is still no support for bfloat16 in NumPy,
......
...@@ -12860,8 +12860,8 @@ def logical_or(x, y, out=None, name=None): ...@@ -12860,8 +12860,8 @@ def logical_or(x, y, out=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
x_data = np.array([True, False], dtype=np.bool).reshape(2, 1) x_data = np.array([True, False], dtype=np.bool_).reshape(2, 1)
y_data = np.array([True, False, True, False], dtype=np.bool).reshape(2, 2) y_data = np.array([True, False, True, False], dtype=np.bool_).reshape(2, 2)
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data) y = paddle.to_tensor(y_data)
res = paddle.logical_or(x, y) res = paddle.logical_or(x, y)
...@@ -12905,8 +12905,8 @@ def logical_xor(x, y, out=None, name=None): ...@@ -12905,8 +12905,8 @@ def logical_xor(x, y, out=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
x_data = np.array([True, False], dtype=np.bool).reshape([2, 1]) x_data = np.array([True, False], dtype=np.bool_).reshape([2, 1])
y_data = np.array([True, False, True, False], dtype=np.bool).reshape([2, 2]) y_data = np.array([True, False, True, False], dtype=np.bool_).reshape([2, 2])
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data) y = paddle.to_tensor(y_data)
res = paddle.logical_xor(x, y) res = paddle.logical_xor(x, y)
......
...@@ -145,7 +145,7 @@ class DataLoaderBase(object): ...@@ -145,7 +145,7 @@ class DataLoaderBase(object):
@classmethod @classmethod
def _check_input_array(cls, item): def _check_input_array(cls, item):
arr = np.asarray(item) arr = np.asarray(item)
if arr.dtype == np.object: if arr.dtype == np.object_:
raise TypeError( raise TypeError(
"\n\tFaild to convert input data to a regular ndarray :\n\t* Usually " "\n\tFaild to convert input data to a regular ndarray :\n\t* Usually "
"this means the input data contains nested lists with different lengths. " "this means the input data contains nested lists with different lengths. "
......
...@@ -44,8 +44,8 @@ def rand_x(dims=1, ...@@ -44,8 +44,8 @@ def rand_x(dims=1,
complex=False): complex=False):
shape = [np.random.randint(min_dim_len, max_dim_len) for i in range(dims)] shape = [np.random.randint(min_dim_len, max_dim_len) for i in range(dims)]
if complex: if complex:
return np.random.randn(*shape).astype( return np.random.randn(
dtype) + 1.j * np.random.randn(*shape).astype(dtype) *shape).astype(dtype) + 1.j * np.random.randn(*shape).astype(dtype)
else: else:
return np.random.randn(*shape).astype(dtype) return np.random.randn(*shape).astype(dtype)
...@@ -473,7 +473,7 @@ class TestIrfft2(unittest.TestCase): ...@@ -473,7 +473,7 @@ class TestIrfft2(unittest.TestCase):
@parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [ @parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [
('test_bool_input', ('test_bool_input',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype(
np.bool8), None, -1, 'backward', NotImplementedError), np.bool_), None, -1, 'backward', NotImplementedError),
('test_n_nagative', np.random.randn(4, 4, 4) + ('test_n_nagative', np.random.randn(4, 4, 4) +
1j * np.random.randn(4, 4, 4), -1, -1, 'backward', ValueError), 1j * np.random.randn(4, 4, 4), -1, -1, 'backward', ValueError),
('test_n_zero', np.random.randn(4, 4) + 1j * np.random.randn(4, 4), 0, -1, ('test_n_zero', np.random.randn(4, 4) + 1j * np.random.randn(4, 4), 0, -1,
...@@ -543,7 +543,7 @@ class TestIrfftException(unittest.TestCase): ...@@ -543,7 +543,7 @@ class TestIrfftException(unittest.TestCase):
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'),
[('test_bool_input', [('test_bool_input',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype(
np.bool8), None, (-2, -1), 'backward', NotImplementedError), np.bool_), None, (-2, -1), 'backward', NotImplementedError),
('test_n_nagative', ('test_n_nagative',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2),
(-2, -1), 'backward', ValueError), (-2, -1), 'backward', ValueError),
...@@ -625,7 +625,7 @@ class TestIrfft2Exception(unittest.TestCase): ...@@ -625,7 +625,7 @@ class TestIrfft2Exception(unittest.TestCase):
(TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'),
[('test_bool_input', [('test_bool_input',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype(
np.bool8), None, (-2, -1), 'backward', NotImplementedError), np.bool_), None, (-2, -1), 'backward', NotImplementedError),
('test_n_nagative', ('test_n_nagative',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2),
(-2, -1), 'backward', ValueError), (-2, -1), 'backward', ValueError),
......
...@@ -370,7 +370,7 @@ class TestIrfft2(unittest.TestCase): ...@@ -370,7 +370,7 @@ class TestIrfft2(unittest.TestCase):
4), None, -1, 'backward', TypeError), 4), None, -1, 'backward', TypeError),
('test_bool_input', ('test_bool_input',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype(
np.bool8), None, -1, 'backward', TypeError), np.bool_), None, -1, 'backward', TypeError),
('test_n_nagative', np.random.randn(4, 4, 4) + ('test_n_nagative', np.random.randn(4, 4, 4) +
1j * np.random.randn(4, 4, 4), -1, -1, 'backward', ValueError), 1j * np.random.randn(4, 4, 4), -1, -1, 'backward', ValueError),
('test_n_zero', np.random.randn(4, 4) + 1j * np.random.randn(4, 4), 0, -1, ('test_n_zero', np.random.randn(4, 4) + 1j * np.random.randn(4, 4), 0, -1,
...@@ -406,7 +406,7 @@ class TestHfftException(unittest.TestCase): ...@@ -406,7 +406,7 @@ class TestHfftException(unittest.TestCase):
4), None, -1, 'backward', TypeError), 4), None, -1, 'backward', TypeError),
('test_bool_input', ('test_bool_input',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype(
np.bool8), None, -1, 'backward', TypeError), np.bool_), None, -1, 'backward', TypeError),
('test_n_nagative', np.random.randn(4, 4, 4) + ('test_n_nagative', np.random.randn(4, 4, 4) +
1j * np.random.randn(4, 4, 4), -1, -1, 'backward', ValueError), 1j * np.random.randn(4, 4, 4), -1, -1, 'backward', ValueError),
('test_n_zero', np.random.randn(4, 4) + 1j * np.random.randn(4, 4), 0, -1, ('test_n_zero', np.random.randn(4, 4) + 1j * np.random.randn(4, 4), 0, -1,
...@@ -444,7 +444,7 @@ class TestIrfftException(unittest.TestCase): ...@@ -444,7 +444,7 @@ class TestIrfftException(unittest.TestCase):
4, 4, 4), None, None, 'backward', TypeError), 4, 4, 4), None, None, 'backward', TypeError),
('test_bool_input', ('test_bool_input',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype(
np.bool8), None, (-2, -1), 'backward', TypeError), np.bool_), None, (-2, -1), 'backward', TypeError),
('test_n_nagative', ('test_n_nagative',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2),
(-2, -1), 'backward', ValueError), (-2, -1), 'backward', ValueError),
...@@ -485,7 +485,7 @@ class TestHfft2Exception(unittest.TestCase): ...@@ -485,7 +485,7 @@ class TestHfft2Exception(unittest.TestCase):
4, 4, 4), None, None, 'backward', TypeError), 4, 4, 4), None, None, 'backward', TypeError),
('test_bool_input', ('test_bool_input',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype(
np.bool8), None, (-2, -1), 'backward', TypeError), np.bool_), None, (-2, -1), 'backward', TypeError),
('test_n_nagative', ('test_n_nagative',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2),
(-2, -1), 'backward', ValueError), (-2, -1), 'backward', ValueError),
...@@ -526,7 +526,7 @@ class TestIrfft2Exception(unittest.TestCase): ...@@ -526,7 +526,7 @@ class TestIrfft2Exception(unittest.TestCase):
4, 4, 4), None, None, 'backward', TypeError), 4, 4, 4), None, None, 'backward', TypeError),
('test_bool_input', ('test_bool_input',
(np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype( (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype(
np.bool8), None, (-2, -1), 'backward', TypeError), np.bool_), None, (-2, -1), 'backward', TypeError),
('test_n_nagative', ('test_n_nagative',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2),
(-2, -1), 'backward', ValueError), (-2, -1), 'backward', ValueError),
...@@ -568,7 +568,7 @@ class TestHfftnException(unittest.TestCase): ...@@ -568,7 +568,7 @@ class TestHfftnException(unittest.TestCase):
4, 4, 4), None, None, 'backward', TypeError), 4, 4, 4), None, None, 'backward', TypeError),
# ('test_bool_input', # ('test_bool_input',
# (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4) # (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)
# ).astype(np.bool8), None, (-2, -1), 'backward', ValueError), # ).astype(np.bool_), None, (-2, -1), 'backward', ValueError),
('test_n_nagative', ('test_n_nagative',
np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2),
(-2, -1), 'backward', ValueError), (-2, -1), 'backward', ValueError),
......
...@@ -86,7 +86,7 @@ class TestAssignBoolValue(TestBase): ...@@ -86,7 +86,7 @@ class TestAssignBoolValue(TestBase):
self.feed_fp32 = {'in_0': data.astype(np.float32)} self.feed_fp32 = {'in_0': data.astype(np.float32)}
self.feed_fp16 = {'in_0': data.astype(np.float16)} self.feed_fp16 = {'in_0': data.astype(np.float16)}
data = np.random.choice([True, False], size=(2, 3, 1)) data = np.random.choice([True, False], size=(2, 3, 1))
self.assign_bool = data.astype(np.bool) self.assign_bool = data.astype(np.bool_)
@IPUOpTest.static_graph @IPUOpTest.static_graph
def build_model(self): def build_model(self):
......
...@@ -64,7 +64,7 @@ class TestMKLDNNMulOpS8S8(OpTest): ...@@ -64,7 +64,7 @@ class TestMKLDNNMulOpS8S8(OpTest):
B_data = np.random.uniform(-127, 127, (5, 20)).astype(np.float32) B_data = np.random.uniform(-127, 127, (5, 20)).astype(np.float32)
quant_B = np.round(B_data * self.scale_y[0]).astype(np.int) quant_B = np.round(B_data * self.scale_y[0]).astype(np.int_)
output = np.dot(A_data, quant_B) output = np.dot(A_data, quant_B)
scale_output_shift = (self.scale_out) / \ scale_output_shift = (self.scale_out) / \
...@@ -136,7 +136,7 @@ class TestMKLDNNMulOpS8S8WithFlatten(TestMKLDNNMulOpS8S8): ...@@ -136,7 +136,7 @@ class TestMKLDNNMulOpS8S8WithFlatten(TestMKLDNNMulOpS8S8):
A_data_reshape = A_data.reshape(3 * 4, 4 * 3) A_data_reshape = A_data.reshape(3 * 4, 4 * 3)
B_data_reshape = B_data.reshape(2 * 6, 1 * 2 * 3) B_data_reshape = B_data.reshape(2 * 6, 1 * 2 * 3)
quant_B = np.round(B_data_reshape * self.scale_y[0]).astype(np.int) quant_B = np.round(B_data_reshape * self.scale_y[0]).astype(np.int_)
output = np.dot(A_data_reshape, quant_B) output = np.dot(A_data_reshape, quant_B)
scale_output_shift = (self.scale_out) / \ scale_output_shift = (self.scale_out) / \
......
...@@ -47,7 +47,7 @@ class TestSoftmaxMKLDNNOp(TestSoftmaxOp): ...@@ -47,7 +47,7 @@ class TestSoftmaxMKLDNNOp(TestSoftmaxOp):
self.shape = self.get_x_shape() self.shape = self.get_x_shape()
self.axis = self.get_axis() self.axis = self.get_axis()
x = np.random.uniform(0.1, 1, self.shape).astype(np.float) x = np.random.uniform(0.1, 1, self.shape).astype(np.float64)
out = convert_float_to_uint16( out = convert_float_to_uint16(
np.apply_along_axis(stable_softmax, self.axis, x)) np.apply_along_axis(stable_softmax, self.axis, x))
......
...@@ -193,7 +193,7 @@ class TestBoxCoderOp(OpTest): ...@@ -193,7 +193,7 @@ class TestBoxCoderOp(OpTest):
} }
if self.use_variance: if self.use_variance:
self.attrs['variance'] = self.prior_box_var.astype( self.attrs['variance'] = self.prior_box_var.astype(
np.float).flatten() np.float64).flatten()
if self.axis != 0: if self.axis != 0:
self.attrs['axis'] = self.axis self.attrs['axis'] = self.axis
......
...@@ -84,7 +84,7 @@ class TestNpuDensityPriorBoxOp(OpTest): ...@@ -84,7 +84,7 @@ class TestNpuDensityPriorBoxOp(OpTest):
self.batch_size = 10 self.batch_size = 10
self.variances = [0.1, 0.1, 0.2, 0.2] self.variances = [0.1, 0.1, 0.2, 0.2]
self.variances = np.array(self.variances, dtype=np.float).flatten() self.variances = np.array(self.variances, dtype=np.float64).flatten()
self.clip = True self.clip = True
self.num_priors = 0 self.num_priors = 0
......
...@@ -48,7 +48,7 @@ class TestFillZerosLikeOp(OpTest): ...@@ -48,7 +48,7 @@ class TestFillZerosLikeOp(OpTest):
class TestFillZerosLikeOpBool(TestFillZerosLikeOp): class TestFillZerosLikeOpBool(TestFillZerosLikeOp):
def init_dtype(self): def init_dtype(self):
self.dtype = np.bool self.dtype = np.bool_
class TestFillZerosLikeOpFp16(TestFillZerosLikeOp): class TestFillZerosLikeOpFp16(TestFillZerosLikeOp):
......
...@@ -95,9 +95,9 @@ class TestNPUPriorBox(OpTest): ...@@ -95,9 +95,9 @@ class TestNPUPriorBox(OpTest):
self.set_min_max_aspect_ratios_order() self.set_min_max_aspect_ratios_order()
self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0] self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0]
self.aspect_ratios = np.array(self.aspect_ratios, self.aspect_ratios = np.array(self.aspect_ratios,
dtype=np.float).flatten() dtype=np.float64).flatten()
self.variances = [0.1, 0.1, 0.2, 0.2] self.variances = [0.1, 0.1, 0.2, 0.2]
self.variances = np.array(self.variances, dtype=np.float).flatten() self.variances = np.array(self.variances, dtype=np.float64).flatten()
self.clip = True self.clip = True
self.num_priors = len(self.real_aspect_ratios) * len(self.min_sizes) self.num_priors = len(self.real_aspect_ratios) * len(self.min_sizes)
......
...@@ -106,7 +106,7 @@ class TestReduceMaxOpWithOutDtype_bool(TestNPUReduceMaxOp): ...@@ -106,7 +106,7 @@ class TestReduceMaxOpWithOutDtype_bool(TestNPUReduceMaxOp):
} }
self.outputs = { self.outputs = {
'Out': 'Out':
self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype(np.bool) self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype(np.bool_)
} }
......
...@@ -106,7 +106,7 @@ class TestReduceMinOpWithOutDtype_bool(TestNPUReduceMinOp): ...@@ -106,7 +106,7 @@ class TestReduceMinOpWithOutDtype_bool(TestNPUReduceMinOp):
} }
self.outputs = { self.outputs = {
'Out': 'Out':
self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype(np.bool) self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype(np.bool_)
} }
......
...@@ -129,7 +129,8 @@ class TestNPUReduceProdWithOutDtype_bool(TestNPUReduceProd): ...@@ -129,7 +129,8 @@ class TestNPUReduceProdWithOutDtype_bool(TestNPUReduceProd):
self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.BOOL)} self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.BOOL)}
self.outputs = { self.outputs = {
'Out': 'Out':
self.inputs['X'].prod(axis=tuple(self.attrs['dim'])).astype(np.bool) self.inputs['X'].prod(axis=tuple(self.attrs['dim'])).astype(
np.bool_)
} }
......
...@@ -72,7 +72,7 @@ class TestSizeOp4(TestSizeOp): ...@@ -72,7 +72,7 @@ class TestSizeOp4(TestSizeOp):
def config(self): def config(self):
self.shape = [2**10] self.shape = [2**10]
self.dtype = np.bool self.dtype = np.bool_
class TestSizeOp5(TestSizeOp): class TestSizeOp5(TestSizeOp):
......
...@@ -199,7 +199,7 @@ class TestNPUTrilTriu_bool(TestNPUTrilTriu): ...@@ -199,7 +199,7 @@ class TestNPUTrilTriu_bool(TestNPUTrilTriu):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
def init_dtype(self): def init_dtype(self):
self.dtype = np.bool self.dtype = np.bool_
def initTestCase(self): def initTestCase(self):
self.real_op_type = np.random.choice(['triu', 'tril']) self.real_op_type = np.random.choice(['triu', 'tril'])
......
...@@ -37,7 +37,7 @@ class TestUpdateLossScalingOpMinLossScalingBad(TestUpdateLossScalingOpBad): ...@@ -37,7 +37,7 @@ class TestUpdateLossScalingOpMinLossScalingBad(TestUpdateLossScalingOpBad):
self.init() self.init()
fluid.core.globals()['FLAGS_min_loss_scaling'] = 1639 fluid.core.globals()['FLAGS_min_loss_scaling'] = 1639
found_inf = np.array([True], dtype=np.bool) found_inf = np.array([True], dtype=np.bool_)
x = np.random.random((1024, 1024)).astype(self.dtype) x = np.random.random((1024, 1024)).astype(self.dtype)
i = np.random.randint(0, 1024, 1) i = np.random.randint(0, 1024, 1)
j = np.random.randint(0, 1024, 1) j = np.random.randint(0, 1024, 1)
......
...@@ -34,7 +34,7 @@ class TestUpdateLossScalingOp(OpTest): ...@@ -34,7 +34,7 @@ class TestUpdateLossScalingOp(OpTest):
self.place = paddle.NPUPlace(0) self.place = paddle.NPUPlace(0)
self.init() self.init()
found_inf = np.array([False], dtype=np.bool) found_inf = np.array([False], dtype=np.bool_)
x = np.random.random((1024, 1024)).astype(self.dtype) x = np.random.random((1024, 1024)).astype(self.dtype)
self.inputs = { self.inputs = {
...@@ -82,7 +82,7 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): ...@@ -82,7 +82,7 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp):
self.place = paddle.NPUPlace(0) self.place = paddle.NPUPlace(0)
self.init() self.init()
found_inf = np.array([True], dtype=np.bool) found_inf = np.array([True], dtype=np.bool_)
x = np.random.random((1024, 1024)).astype(self.dtype) x = np.random.random((1024, 1024)).astype(self.dtype)
i = np.random.randint(0, 1024, 1) i = np.random.randint(0, 1024, 1)
j = np.random.randint(0, 1024, 1) j = np.random.randint(0, 1024, 1)
......
...@@ -471,7 +471,7 @@ class OpTest(unittest.TestCase): ...@@ -471,7 +471,7 @@ class OpTest(unittest.TestCase):
np.dtype(np.int16), np.dtype(np.int16),
np.dtype(np.int8), np.dtype(np.int8),
np.dtype(np.uint8), np.dtype(np.uint8),
np.dtype(np.bool) np.dtype(np.bool_)
] ]
# check the dtype in dtype_list in order, select the first dtype that in dtype_set # check the dtype in dtype_list in order, select the first dtype that in dtype_set
for dtype in dtype_list: for dtype in dtype_list:
......
...@@ -145,7 +145,7 @@ class TestAssignOApi(unittest.TestCase): ...@@ -145,7 +145,7 @@ class TestAssignOApi(unittest.TestCase):
def test_assign_NumpyArray(self): def test_assign_NumpyArray(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
array = np.random.random(size=(100, 10)).astype(np.bool) array = np.random.random(size=(100, 10)).astype(np.bool_)
result1 = paddle.zeros(shape=[3, 3], dtype='float32') result1 = paddle.zeros(shape=[3, 3], dtype='float32')
paddle.assign(array, result1) paddle.assign(array, result1)
self.assertTrue(np.allclose(result1.numpy(), array)) self.assertTrue(np.allclose(result1.numpy(), array))
......
...@@ -35,7 +35,7 @@ def bipartite_match(distance, match_indices, match_dist): ...@@ -35,7 +35,7 @@ def bipartite_match(distance, match_indices, match_dist):
match_sorted = sorted(match_pair, key=lambda tup: tup[2], reverse=True) match_sorted = sorted(match_pair, key=lambda tup: tup[2], reverse=True)
row_indices = -1 * np.ones((row, ), dtype=np.int) row_indices = -1 * np.ones((row, ), dtype=np.int_)
idx = 0 idx = 0
for i, j, dist in match_sorted: for i, j, dist in match_sorted:
...@@ -69,7 +69,7 @@ def batch_bipartite_match(distance, lod, match_type=None, dist_threshold=None): ...@@ -69,7 +69,7 @@ def batch_bipartite_match(distance, lod, match_type=None, dist_threshold=None):
""" """
n = len(lod) n = len(lod)
m = distance.shape[1] m = distance.shape[1]
match_indices = -1 * np.ones((n, m), dtype=np.int) match_indices = -1 * np.ones((n, m), dtype=np.int_)
match_dist = np.zeros((n, m), dtype=np.float32) match_dist = np.zeros((n, m), dtype=np.float32)
cur_offset = 0 cur_offset = 0
for i in range(n): for i in range(n):
......
...@@ -235,7 +235,7 @@ class TestBoxCoderOpWithVariance(OpTest): ...@@ -235,7 +235,7 @@ class TestBoxCoderOpWithVariance(OpTest):
self.attrs = { self.attrs = {
'code_type': 'decode_center_size', 'code_type': 'decode_center_size',
'box_normalized': False, 'box_normalized': False,
'variance': prior_box_var.astype(np.float).flatten(), 'variance': prior_box_var.astype(np.float64).flatten(),
'axis': axis 'axis': axis
} }
self.outputs = {'OutputBox': output_box} self.outputs = {'OutputBox': output_box}
......
...@@ -249,8 +249,8 @@ def create_paddle_case(op_type, callback): ...@@ -249,8 +249,8 @@ def create_paddle_case(op_type, callback):
op = eval("paddle.%s" % (self.op_type)) op = eval("paddle.%s" % (self.op_type))
out = op(x, y) out = op(x, y)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
input_x = np.array([True, False, True]).astype(np.bool) input_x = np.array([True, False, True]).astype(np.bool_)
input_y = np.array([True, True, False]).astype(np.bool) input_y = np.array([True, True, False]).astype(np.bool_)
real_result = callback(input_x, input_y) real_result = callback(input_x, input_y)
res, = exe.run(feed={ res, = exe.run(feed={
"x": input_x, "x": input_x,
...@@ -267,8 +267,8 @@ def create_paddle_case(op_type, callback): ...@@ -267,8 +267,8 @@ def create_paddle_case(op_type, callback):
op = eval("paddle.%s" % (self.op_type)) op = eval("paddle.%s" % (self.op_type))
out = op(x, y) out = op(x, y)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
input_x = np.array([True, False, True]).astype(np.bool) input_x = np.array([True, False, True]).astype(np.bool_)
input_y = np.array([True]).astype(np.bool) input_y = np.array([True]).astype(np.bool_)
real_result = callback(input_x, input_y) real_result = callback(input_x, input_y)
res, = exe.run(feed={ res, = exe.run(feed={
"x": input_x, "x": input_x,
......
...@@ -70,7 +70,7 @@ class TestDensityPriorBoxOp(OpTest): ...@@ -70,7 +70,7 @@ class TestDensityPriorBoxOp(OpTest):
self.batch_size = 10 self.batch_size = 10
self.variances = [0.1, 0.1, 0.2, 0.2] self.variances = [0.1, 0.1, 0.2, 0.2]
self.variances = np.array(self.variances, dtype=np.float).flatten() self.variances = np.array(self.variances, dtype=np.float64).flatten()
self.clip = True self.clip = True
self.num_priors = 0 self.num_priors = 0
......
...@@ -27,7 +27,7 @@ def compare(ref, res, atol, rtol): ...@@ -27,7 +27,7 @@ def compare(ref, res, atol, rtol):
ref = np.array(ref).flatten() ref = np.array(ref).flatten()
res = np.array(res).flatten() res = np.array(res).flatten()
tmp_ref = ref.astype(np.float) tmp_ref = ref.astype(np.float64)
tol = atol + rtol * abs(tmp_ref) tol = atol + rtol * abs(tmp_ref)
diff = abs(res - ref) diff = abs(res - ref)
......
...@@ -164,7 +164,7 @@ class TestFusedMultiTransformerOp(OpTest): ...@@ -164,7 +164,7 @@ class TestFusedMultiTransformerOp(OpTest):
self.attn_mask = (self.attn_mask - 1.0) * 1e4 self.attn_mask = (self.attn_mask - 1.0) * 1e4
else: else:
self.attn_mask = (np.tril(self.attn_mask) - 1.0) * 1e4 self.attn_mask = (np.tril(self.attn_mask) - 1.0) * 1e4
elif self.attn_mask_type == np.bool: elif self.attn_mask_type == np.bool_:
if self.has_cache_kv and not self.gen_cache_kv: if self.has_cache_kv and not self.gen_cache_kv:
self.attn_mask[:, :, :, -2] = 0 self.attn_mask[:, :, :, -2] = 0
else: else:
...@@ -395,7 +395,7 @@ class TestFusedMultiTransformerOp(OpTest): ...@@ -395,7 +395,7 @@ class TestFusedMultiTransformerOp(OpTest):
epsilon = 1e-05 epsilon = 1e-05
ln2_epsilon = 1e-05 ln2_epsilon = 1e-05
if attn_mask is not None and self.attn_mask_type != np.bool: if attn_mask is not None and self.attn_mask_type != np.bool_:
attn_mask = _convert_attention_mask(attn_mask, x.dtype) attn_mask = _convert_attention_mask(attn_mask, x.dtype)
qkv_weights, qkv_biases = [], [] qkv_weights, qkv_biases = [], []
......
...@@ -79,8 +79,8 @@ def poly2mask(xy, k, h, w): ...@@ -79,8 +79,8 @@ def poly2mask(xy, k, h, w):
u.extend([int(xs + s * t + .5) for t in ts]) u.extend([int(xs + s * t + .5) for t in ts])
k = len(u) k = len(u)
x = np.zeros((k), np.int) x = np.zeros((k), np.int_)
y = np.zeros((k), np.int) y = np.zeros((k), np.int_)
m = 0 m = 0
for j in six.moves.xrange(1, k): for j in six.moves.xrange(1, k):
if u[j] != u[j - 1]: if u[j] != u[j - 1]:
...@@ -116,7 +116,7 @@ def poly2mask(xy, k, h, w): ...@@ -116,7 +116,7 @@ def poly2mask(xy, k, h, w):
b[m - 1] += a[j] b[m - 1] += a[j]
j += 1 j += 1
mask = decode(b, m) mask = decode(b, m)
mask = np.array(mask, dtype=np.int).reshape((w, h)) mask = np.array(mask, dtype=np.int_).reshape((w, h))
mask = mask.transpose((1, 0)) mask = mask.transpose((1, 0))
return mask return mask
......
...@@ -151,7 +151,7 @@ class TestComplexKronOp(OpTest): ...@@ -151,7 +151,7 @@ class TestComplexKronOp(OpTest):
self.grad_y = self.get_grad_y_by_numpy() self.grad_y = self.get_grad_y_by_numpy()
def get_grad_x_by_numpy(self): def get_grad_x_by_numpy(self):
grad_x = np.zeros(self.x_shape, np.complex) grad_x = np.zeros(self.x_shape, np.complex128)
for x_i in range(self.x_shape[0]): for x_i in range(self.x_shape[0]):
for x_j in range(self.x_shape[1]): for x_j in range(self.x_shape[1]):
for i in range(self.y_shape[0]): for i in range(self.y_shape[0]):
...@@ -163,7 +163,7 @@ class TestComplexKronOp(OpTest): ...@@ -163,7 +163,7 @@ class TestComplexKronOp(OpTest):
return grad_x return grad_x
def get_grad_y_by_numpy(self): def get_grad_y_by_numpy(self):
grad_y = np.zeros(self.y_shape, np.complex) grad_y = np.zeros(self.y_shape, np.complex128)
for y_i in range(self.y_shape[0]): for y_i in range(self.y_shape[0]):
for y_j in range(self.y_shape[1]): for y_j in range(self.y_shape[1]):
for x_i in range(self.x_shape[0]): for x_i in range(self.x_shape[0]):
......
...@@ -41,7 +41,7 @@ class TestOnesLikeAPI(unittest.TestCase): ...@@ -41,7 +41,7 @@ class TestOnesLikeAPI(unittest.TestCase):
# 'bool', 'float32', 'float64', 'int32', 'int64' # 'bool', 'float32', 'float64', 'int32', 'int64'
out1 = ones_like(x) out1 = ones_like(x)
out2 = ones_like(x, np.bool) out2 = ones_like(x, np.bool_)
out3 = ones_like(x, 'float64') out3 = ones_like(x, 'float64')
out4 = ones_like(x, 'int32') out4 = ones_like(x, 'int32')
out5 = ones_like(x, 'int64') out5 = ones_like(x, 'int64')
...@@ -54,7 +54,7 @@ class TestOnesLikeAPI(unittest.TestCase): ...@@ -54,7 +54,7 @@ class TestOnesLikeAPI(unittest.TestCase):
fetch_list=[out1, out2, out3, out4, out5]) fetch_list=[out1, out2, out3, out4, out5])
for i, dtype in enumerate( for i, dtype in enumerate(
[np.float32, np.bool, np.float64, np.int32, np.int64]): [np.float32, np.bool_, np.float64, np.int32, np.int64]):
self.assertEqual(outs[i].dtype, dtype) self.assertEqual(outs[i].dtype, dtype)
self.assertEqual((outs[i] == np.ones(shape, dtype)).all(), True) self.assertEqual((outs[i] == np.ones(shape, dtype)).all(), True)
...@@ -67,7 +67,7 @@ class TestOnesLikeImpeartive(unittest.TestCase): ...@@ -67,7 +67,7 @@ class TestOnesLikeImpeartive(unittest.TestCase):
0) if core.is_compiled_with_cuda() else fluid.CPUPlace() 0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
paddle.disable_static(place) paddle.disable_static(place)
x = paddle.to_tensor(np.ones(shape)) x = paddle.to_tensor(np.ones(shape))
for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]: for dtype in [np.bool_, np.float32, np.float64, np.int32, np.int64]:
out = ones_like(x, dtype) out = ones_like(x, dtype)
self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), True) self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), True)
......
...@@ -81,9 +81,9 @@ class TestPriorBoxOp(OpTest): ...@@ -81,9 +81,9 @@ class TestPriorBoxOp(OpTest):
self.set_min_max_aspect_ratios_order() self.set_min_max_aspect_ratios_order()
self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0] self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0]
self.aspect_ratios = np.array(self.aspect_ratios, self.aspect_ratios = np.array(self.aspect_ratios,
dtype=np.float).flatten() dtype=np.float64).flatten()
self.variances = [0.1, 0.1, 0.2, 0.2] self.variances = [0.1, 0.1, 0.2, 0.2]
self.variances = np.array(self.variances, dtype=np.float).flatten() self.variances = np.array(self.variances, dtype=np.float64).flatten()
self.clip = True self.clip = True
self.num_priors = len(self.real_aspect_ratios) * len(self.min_sizes) self.num_priors = len(self.real_aspect_ratios) * len(self.min_sizes)
......
...@@ -965,7 +965,7 @@ class TestAllAPI(unittest.TestCase): ...@@ -965,7 +965,7 @@ class TestAllAPI(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
for place in self.places: for place in self.places:
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool) np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
x = fluid.layers.assign(np_x) x = fluid.layers.assign(np_x)
x = fluid.layers.cast(x, 'bool') x = fluid.layers.cast(x, 'bool')
...@@ -1021,7 +1021,7 @@ class TestAnyAPI(unittest.TestCase): ...@@ -1021,7 +1021,7 @@ class TestAnyAPI(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
for place in self.places: for place in self.places:
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool) np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_)
x = fluid.layers.assign(np_x) x = fluid.layers.assign(np_x)
x = fluid.layers.cast(x, 'bool') x = fluid.layers.cast(x, 'bool')
......
...@@ -81,7 +81,7 @@ def normalize(S, norm=np.inf, axis=0, threshold=None, fill=None): ...@@ -81,7 +81,7 @@ def normalize(S, norm=np.inf, axis=0, threshold=None, fill=None):
raise Exception("Input must be finite") raise Exception("Input must be finite")
# All norms only depend on magnitude, let's do that first # All norms only depend on magnitude, let's do that first
mag = np.abs(S).astype(np.float) mag = np.abs(S).astype(np.float64)
# For max/min norms, filling with 1 works # For max/min norms, filling with 1 works
fill_norm = 1 fill_norm = 1
...@@ -598,8 +598,8 @@ def rand_x(dims=1, ...@@ -598,8 +598,8 @@ def rand_x(dims=1,
np.random.randint(min_dim_len, max_dim_len) for i in range(dims) np.random.randint(min_dim_len, max_dim_len) for i in range(dims)
] ]
if complex: if complex:
return np.random.randn(*shape).astype( return np.random.randn(
dtype) + 1.j * np.random.randn(*shape).astype(dtype) *shape).astype(dtype) + 1.j * np.random.randn(*shape).astype(dtype)
else: else:
return np.random.randn(*shape).astype(dtype) return np.random.randn(*shape).astype(dtype)
......
...@@ -24,7 +24,7 @@ class TestUpdateLossScalingOp(OpTest): ...@@ -24,7 +24,7 @@ class TestUpdateLossScalingOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "update_loss_scaling" self.op_type = "update_loss_scaling"
self.init() self.init()
found_inf = np.array([False], dtype=np.bool) found_inf = np.array([False], dtype=np.bool_)
x = np.random.random((1024, 1024)).astype(self.dtype) x = np.random.random((1024, 1024)).astype(self.dtype)
self.inputs = { self.inputs = {
...@@ -66,7 +66,7 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): ...@@ -66,7 +66,7 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp):
def setUp(self): def setUp(self):
self.op_type = "update_loss_scaling" self.op_type = "update_loss_scaling"
self.init() self.init()
found_inf = np.array([True], dtype=np.bool) found_inf = np.array([True], dtype=np.bool_)
x = np.random.random((1024, 1024)).astype(self.dtype) x = np.random.random((1024, 1024)).astype(self.dtype)
i = np.random.randint(0, 1024, 1) i = np.random.randint(0, 1024, 1)
j = np.random.randint(0, 1024, 1) j = np.random.randint(0, 1024, 1)
......
...@@ -43,7 +43,7 @@ class TestZerosLikeAPI(unittest.TestCase): ...@@ -43,7 +43,7 @@ class TestZerosLikeAPI(unittest.TestCase):
with program_guard(train_program, startup_program): with program_guard(train_program, startup_program):
x = paddle.fluid.data('X', shape) x = paddle.fluid.data('X', shape)
out1 = zeros_like(x) out1 = zeros_like(x)
out2 = zeros_like(x, np.bool) out2 = zeros_like(x, np.bool_)
out3 = zeros_like(x, 'float64') out3 = zeros_like(x, 'float64')
out4 = zeros_like(x, 'int32') out4 = zeros_like(x, 'int32')
out5 = zeros_like(x, 'int64') out5 = zeros_like(x, 'int64')
...@@ -54,7 +54,7 @@ class TestZerosLikeAPI(unittest.TestCase): ...@@ -54,7 +54,7 @@ class TestZerosLikeAPI(unittest.TestCase):
feed={'X': np.ones(shape).astype('float32')}, feed={'X': np.ones(shape).astype('float32')},
fetch_list=[out1, out2, out3, out4, out5]) fetch_list=[out1, out2, out3, out4, out5])
for (i, dtype) in enumerate( for (i, dtype) in enumerate(
[np.float32, np.bool, np.float64, np.int32, np.int64]): [np.float32, np.bool_, np.float64, np.int32, np.int64]):
self.assertEqual(outs[i].dtype, dtype) self.assertEqual(outs[i].dtype, dtype)
self.assertEqual((outs[i] == np.zeros(shape, dtype)).all(), True) self.assertEqual((outs[i] == np.zeros(shape, dtype)).all(), True)
...@@ -71,7 +71,7 @@ class TestZerosLikeImpeartive(unittest.TestCase): ...@@ -71,7 +71,7 @@ class TestZerosLikeImpeartive(unittest.TestCase):
if core.is_compiled_with_cuda() else fluid.CPUPlace()) if core.is_compiled_with_cuda() else fluid.CPUPlace())
paddle.disable_static(place) paddle.disable_static(place)
x = paddle.to_tensor(np.ones(shape)) x = paddle.to_tensor(np.ones(shape))
for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]: for dtype in [np.bool_, np.float32, np.float64, np.int32, np.int64]:
out = zeros_like(x, dtype) out = zeros_like(x, dtype)
self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(),
True) True)
......
...@@ -240,8 +240,8 @@ def create_paddle_case(op_type, callback): ...@@ -240,8 +240,8 @@ def create_paddle_case(op_type, callback):
op = eval("paddle.%s" % (self.op_type)) op = eval("paddle.%s" % (self.op_type))
out = op(x, y) out = op(x, y)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
input_x = np.array([True, False, True]).astype(np.bool) input_x = np.array([True, False, True]).astype(np.bool_)
input_y = np.array([True, True, False]).astype(np.bool) input_y = np.array([True, True, False]).astype(np.bool_)
real_result = callback(input_x, input_y) real_result = callback(input_x, input_y)
res, = exe.run(feed={ res, = exe.run(feed={
"x": input_x, "x": input_x,
...@@ -258,8 +258,8 @@ def create_paddle_case(op_type, callback): ...@@ -258,8 +258,8 @@ def create_paddle_case(op_type, callback):
op = eval("paddle.%s" % (self.op_type)) op = eval("paddle.%s" % (self.op_type))
out = op(x, y) out = op(x, y)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
input_x = np.array([True, False, True]).astype(np.bool) input_x = np.array([True, False, True]).astype(np.bool_)
input_y = np.array([True]).astype(np.bool) input_y = np.array([True]).astype(np.bool_)
real_result = callback(input_x, input_y) real_result = callback(input_x, input_y)
res, = exe.run(feed={ res, = exe.run(feed={
"x": input_x, "x": input_x,
......
...@@ -98,9 +98,10 @@ class XPUTestPriorBoxOp(XPUOpTestWrapper): ...@@ -98,9 +98,10 @@ class XPUTestPriorBoxOp(XPUOpTestWrapper):
self.set_min_max_aspect_ratios_order() self.set_min_max_aspect_ratios_order()
self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0] self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0]
self.aspect_ratios = np.array(self.aspect_ratios, self.aspect_ratios = np.array(self.aspect_ratios,
dtype=np.float).flatten() dtype=np.float64).flatten()
self.variances = [0.1, 0.1, 0.2, 0.2] self.variances = [0.1, 0.1, 0.2, 0.2]
self.variances = np.array(self.variances, dtype=np.float).flatten() self.variances = np.array(self.variances,
dtype=np.float64).flatten()
self.clip = True self.clip = True
self.num_priors = len(self.real_aspect_ratios) * len(self.min_sizes) self.num_priors = len(self.real_aspect_ratios) * len(self.min_sizes)
......
...@@ -31,7 +31,7 @@ class TestUpdateLossScalingOp(XPUOpTest): ...@@ -31,7 +31,7 @@ class TestUpdateLossScalingOp(XPUOpTest):
def setUp(self): def setUp(self):
self.op_type = "update_loss_scaling" self.op_type = "update_loss_scaling"
self.init() self.init()
found_inf = np.array([False], dtype=np.bool) found_inf = np.array([False], dtype=np.bool_)
x = np.random.random((1024, 1024)).astype(self.dtype) x = np.random.random((1024, 1024)).astype(self.dtype)
self.inputs = { self.inputs = {
...@@ -75,7 +75,7 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): ...@@ -75,7 +75,7 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp):
def setUp(self): def setUp(self):
self.op_type = "update_loss_scaling" self.op_type = "update_loss_scaling"
self.init() self.init()
found_inf = np.array([True], dtype=np.bool) found_inf = np.array([True], dtype=np.bool_)
x = np.random.random((1024, 1024)).astype(self.dtype) x = np.random.random((1024, 1024)).astype(self.dtype)
i = np.random.randint(0, 1024, 1) i = np.random.randint(0, 1024, 1)
j = np.random.randint(0, 1024, 1) j = np.random.randint(0, 1024, 1)
......
...@@ -76,7 +76,7 @@ class _ConvNd(Layer): ...@@ -76,7 +76,7 @@ class _ConvNd(Layer):
format(valid_padding_modes, padding_mode)) format(valid_padding_modes, padding_mode))
if padding_mode in {'reflect', 'replicate', 'circular' if padding_mode in {'reflect', 'replicate', 'circular'
} and not isinstance(padding, np.int): } and not isinstance(padding, int):
raise TypeError( raise TypeError(
"when padding_mode in ['reflect', 'replicate', 'circular'], type of padding must be int" "when padding_mode in ['reflect', 'replicate', 'circular'], type of padding must be int"
) )
......
...@@ -352,7 +352,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True): ...@@ -352,7 +352,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
data = np.array([data]) data = np.array([data])
elif isinstance(data, (list, tuple)): elif isinstance(data, (list, tuple)):
data = np.array(data) data = np.array(data)
if data.dtype == np.object: if data.dtype == np.object_:
raise ValueError( raise ValueError(
"\n\tFaild to convert input data to a regular ndarray :\n\t - Usually " "\n\tFaild to convert input data to a regular ndarray :\n\t - Usually "
"this means the input data contains nested lists with different lengths. " "this means the input data contains nested lists with different lengths. "
......
...@@ -430,7 +430,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): ...@@ -430,7 +430,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
reduce_all = True if axis == None or axis == [] or asvector == True else False reduce_all = True if axis == None or axis == [] or asvector == True else False
axis = axis if axis != None and axis != [] else [0] axis = axis if axis != None and axis != [] else [0]
reduce_type = 'reduce_max' if porder == np.float( reduce_type = 'reduce_max' if porder == np.float64(
'inf') else 'reduce_min' 'inf') else 'reduce_min'
helper.append_op(type=reduce_type, helper.append_op(type=reduce_type,
inputs={'X': out}, inputs={'X': out},
......
...@@ -146,8 +146,8 @@ def logical_or(x, y, out=None, name=None): ...@@ -146,8 +146,8 @@ def logical_or(x, y, out=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
x_data = np.array([True, False], dtype=np.bool).reshape(2, 1) x_data = np.array([True, False], dtype=np.bool_).reshape(2, 1)
y_data = np.array([True, False, True, False], dtype=np.bool).reshape(2, 2) y_data = np.array([True, False, True, False], dtype=np.bool_).reshape(2, 2)
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data) y = paddle.to_tensor(y_data)
res = paddle.logical_or(x, y) res = paddle.logical_or(x, y)
...@@ -191,8 +191,8 @@ def logical_xor(x, y, out=None, name=None): ...@@ -191,8 +191,8 @@ def logical_xor(x, y, out=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
x_data = np.array([True, False], dtype=np.bool).reshape([2, 1]) x_data = np.array([True, False], dtype=np.bool_).reshape([2, 1])
y_data = np.array([True, False, True, False], dtype=np.bool).reshape([2, 2]) y_data = np.array([True, False, True, False], dtype=np.bool_).reshape([2, 2])
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data) y = paddle.to_tensor(y_data)
res = paddle.logical_xor(x, y) res = paddle.logical_xor(x, y)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册