diff --git a/python/paddle/distributed/fleet/base/fleet_base.py b/python/paddle/distributed/fleet/base/fleet_base.py index 4e975e74bdb14e741fff5787b51df9fbd7e61f14..60382c26f445153337600718559e6916e49b932b 100755 --- a/python/paddle/distributed/fleet/base/fleet_base.py +++ b/python/paddle/distributed/fleet/base/fleet_base.py @@ -1792,8 +1792,8 @@ class Fleet(object): if (param._grad_ivar() is not None) and (param._grad_ivar( ).dtype == core.VarDesc.VarType.FP32) ] - temp_found_inf_fp16 = to_variable(np.array([0]).astype(np.bool)) - temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool)) + temp_found_inf_fp16 = to_variable(np.array([0]).astype(np.bool_)) + temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool_)) if len(param_grads_fp16): _C_ops.check_finite_and_unscale(param_grads_fp16, self._scale, param_grads_fp16, diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py index eae8f87b01420e9cef7dca7a030a4931041feb81..3ce5dcc31d433230f5fb99396ae15bdaf7e7f2dd 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py @@ -197,8 +197,8 @@ def GroupShardedScaler(scaler): else: param_grads_fp32.append(param.grad) - temp_found_inf_fp16 = to_variable(np.array([0]).astype(np.bool)) - temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool)) + temp_found_inf_fp16 = to_variable(np.array([0]).astype(np.bool_)) + temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool_)) device = "cpu" if optimizer.offload else "gpu" dev_id = 0 if device == "cpu" else int(paddle.get_device().split(":")[ diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_utils.py b/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_utils.py index 6a30276e02ba238a0f4ee838164a5bf9976f7d84..0412458981b671f6e02d55b737b046cacc7120fc 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_utils.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/sharding_utils.py @@ -197,8 +197,8 @@ def ShardingScaler(scaler): else: param_grads_fp32.append(param.grad) - temp_found_inf_fp16 = to_variable(np.array([0]).astype(np.bool)) - temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool)) + temp_found_inf_fp16 = to_variable(np.array([0]).astype(np.bool_)) + temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool_)) device = "cpu" if optimizer.offload else "gpu" dev_id = 0 if device == "cpu" else int(paddle.get_device().split(":")[ diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index c7a68c6027be47d4d642ff3cb6dd4389191f3072..1382f7fb5bad010ed753d0cf6da34e32511c8282 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -48,7 +48,7 @@ def convert_dtype(dtype): return _PADDLE_DTYPE_2_NUMPY_DTYPE[dtype] elif isinstance(dtype, type): if dtype in [ - np.bool, np.float16, np.uint16, np.float32, np.float64, np.int8, + bool, np.float16, np.uint16, np.float32, np.float64, np.int8, np.int16, np.int32, np.int64, np.uint8, np.complex64, np.complex128 ]: diff --git a/python/paddle/fluid/dygraph/amp/loss_scaler.py b/python/paddle/fluid/dygraph/amp/loss_scaler.py index c57290861942b8020f6f55792c445d42a0578c90..36ddc276fda756ac3a26c9a3c8e906c63550a4f6 100644 --- a/python/paddle/fluid/dygraph/amp/loss_scaler.py +++ b/python/paddle/fluid/dygraph/amp/loss_scaler.py @@ -128,11 +128,11 @@ class AmpScaler(object): self._decr_count = 0 self._use_dynamic_loss_scaling = use_dynamic_loss_scaling - self._found_inf = to_variable(np.array([0]).astype(np.bool)) + self._found_inf = to_variable(np.array([0]).astype(np.bool_)) self._temp_found_inf_fp16 = to_variable( - np.array([0]).astype(np.bool)) + np.array([0]).astype(np.bool_)) self._temp_found_inf_fp32 = to_variable( - np.array([0]).astype(np.bool)) + np.array([0]).astype(np.bool_)) self._scale = to_variable( np.array([self._init_loss_scaling]).astype(np.float32)) self._cache_founf_inf = None diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 86b0d6560c927251b49d807dd1880867fd4c4bd0..6d365b6dfc6023b30b0c6ef0f17f0f550acce22d 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -480,7 +480,7 @@ def _as_lodtensor(data, place, dtype=None): data = np.array([data]).astype(dtype) elif isinstance(data, (list, tuple)): data = np.array(data) - if data.dtype == np.object: + if data.dtype == np.object_: raise TypeError( "\n\tFaild to convert input data to a regular ndarray :\n\t* Usually " "this means the input data contains nested lists with different lengths. " diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 314a502a3cbef07768ddac68adbe346c26a41739..75f7dc66924a22362bbc01ea78d05a6af3fcfc44 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1101,7 +1101,7 @@ def convert_np_dtype_to_dtype_(np_dtype): return core.VarDesc.VarType.INT16 elif dtype == np.int64: return core.VarDesc.VarType.INT64 - elif dtype == np.bool: + elif dtype == np.bool_: return core.VarDesc.VarType.BOOL elif dtype == np.uint16: # since there is still no support for bfloat16 in NumPy, diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 6e001a555d33758af02b050c9b85bc6f908dd6e1..6e8e4c0419007f4d5a68cce1e099b8a98d6e7d96 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -12679,8 +12679,8 @@ def logical_or(x, y, out=None, name=None): import paddle import numpy as np - x_data = np.array([True, False], dtype=np.bool).reshape(2, 1) - y_data = np.array([True, False, True, False], dtype=np.bool).reshape(2, 2) + x_data = np.array([True, False], dtype=np.bool_).reshape(2, 1) + y_data = np.array([True, False, True, False], dtype=np.bool_).reshape(2, 2) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) res = paddle.logical_or(x, y) @@ -12720,8 +12720,8 @@ def logical_xor(x, y, out=None, name=None): import paddle import numpy as np - x_data = np.array([True, False], dtype=np.bool).reshape([2, 1]) - y_data = np.array([True, False, True, False], dtype=np.bool).reshape([2, 2]) + x_data = np.array([True, False], dtype=np.bool_).reshape([2, 1]) + y_data = np.array([True, False, True, False], dtype=np.bool_).reshape([2, 2]) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) res = paddle.logical_xor(x, y) diff --git a/python/paddle/fluid/reader.py b/python/paddle/fluid/reader.py index 3ea3af9ed1cb5c768dd2268487fc8abd3efc8c44..fca817f53bae7658eee0d11c7c0c1a05797762ce 100644 --- a/python/paddle/fluid/reader.py +++ b/python/paddle/fluid/reader.py @@ -144,7 +144,7 @@ class DataLoaderBase(object): @classmethod def _check_input_array(cls, item): arr = np.asarray(item) - if arr.dtype == np.object: + if arr.dtype == np.object_: raise TypeError( "\n\tFaild to convert input data to a regular ndarray :\n\t* Usually " "this means the input data contains nested lists with different lengths. " diff --git a/python/paddle/fluid/tests/unittests/fft/test_fft.py b/python/paddle/fluid/tests/unittests/fft/test_fft.py index 7ee5a04ece496bb5bbb6b1dd8a65bf22c2a47fec..79345948d07a9bb969de0d354d1dbeeda536c7c6 100644 --- a/python/paddle/fluid/tests/unittests/fft/test_fft.py +++ b/python/paddle/fluid/tests/unittests/fft/test_fft.py @@ -483,7 +483,7 @@ class TestIrfft2(unittest.TestCase): @place(DEVICES) @parameterize((TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [( 'test_bool_input', - (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype(np.bool8), + (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4)).astype(np.bool_), None, -1, 'backward', NotImplementedError), ( 'test_n_nagative', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), -1, -1, @@ -558,7 +558,7 @@ class TestIrfftException(unittest.TestCase): (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [('test_bool_input', (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4) - ).astype(np.bool8), None, (-2, -1), 'backward', NotImplementedError), + ).astype(np.bool_), None, (-2, -1), 'backward', NotImplementedError), ('test_n_nagative', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), (-2, -1), 'backward', ValueError), @@ -640,7 +640,7 @@ class TestIrfft2Exception(unittest.TestCase): (TEST_CASE_NAME, 'x', 'n', 'axis', 'norm', 'expect_exception'), [('test_bool_input', (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4) - ).astype(np.bool8), None, (-2, -1), 'backward', NotImplementedError), + ).astype(np.bool_), None, (-2, -1), 'backward', NotImplementedError), ('test_n_nagative', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), (-2, -1), 'backward', ValueError), diff --git a/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py b/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py index 4f19cd06a493fc71935ea2d1cdb23f9d80c8ab46..5b4e19aa85eed57c624dbf84bc11b53d091bf18b 100644 --- a/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py +++ b/python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py @@ -389,7 +389,7 @@ class TestIrfft2(unittest.TestCase): [('test_input_dtype', np.random.randn(4, 4, 4), None, -1, 'backward', TypeError), ('test_bool_input', (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4) - ).astype(np.bool8), None, -1, 'backward', TypeError), + ).astype(np.bool_), None, -1, 'backward', TypeError), ('test_n_nagative', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), -1, -1, 'backward', ValueError), @@ -426,7 +426,7 @@ class TestHfftException(unittest.TestCase): [('test_input_dtype', np.random.randn(4, 4, 4), None, -1, 'backward', TypeError), ('test_bool_input', (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4) - ).astype(np.bool8), None, -1, 'backward', TypeError), + ).astype(np.bool_), None, -1, 'backward', TypeError), ('test_n_nagative', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), -1, -1, 'backward', ValueError), @@ -464,7 +464,7 @@ class TestIrfftException(unittest.TestCase): [('test_input_dtype', np.random.randn(4, 4, 4), None, None, 'backward', TypeError), ('test_bool_input', (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4) - ).astype(np.bool8), None, (-2, -1), 'backward', TypeError), + ).astype(np.bool_), None, (-2, -1), 'backward', TypeError), ('test_n_nagative', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), (-2, -1), 'backward', ValueError), @@ -505,7 +505,7 @@ class TestHfft2Exception(unittest.TestCase): [('test_input_dtype', np.random.randn(4, 4, 4), None, None, 'backward', TypeError), ('test_bool_input', (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4) - ).astype(np.bool8), None, (-2, -1), 'backward', TypeError), + ).astype(np.bool_), None, (-2, -1), 'backward', TypeError), ('test_n_nagative', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), (-2, -1), 'backward', ValueError), @@ -546,7 +546,7 @@ class TestIrfft2Exception(unittest.TestCase): [('test_input_dtype', np.random.randn(4, 4, 4), None, None, 'backward', TypeError), ('test_bool_input', (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4) - ).astype(np.bool8), None, (-2, -1), 'backward', TypeError), + ).astype(np.bool_), None, (-2, -1), 'backward', TypeError), ('test_n_nagative', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), (-2, -1), 'backward', ValueError), @@ -589,7 +589,7 @@ class TestHfftnException(unittest.TestCase): TypeError), # ('test_bool_input', # (np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4) - # ).astype(np.bool8), None, (-2, -1), 'backward', ValueError), + # ).astype(np.bool_), None, (-2, -1), 'backward', ValueError), ('test_n_nagative', np.random.randn(4, 4, 4) + 1j * np.random.randn(4, 4, 4), (-1, -2), (-2, -1), 'backward', ValueError), diff --git a/python/paddle/fluid/tests/unittests/ipu/test_assign_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_assign_op_ipu.py index 35f4ca17d5eba69c78bfb47955e3604af8fc6854..c4c1f4cd32d538adc09cb2307f2046ff94fb7b28 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_assign_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_assign_op_ipu.py @@ -160,7 +160,7 @@ class TestAssignBoolValue(TestBase): self.feed_fp32 = {'in_0': data.astype(np.float32)} self.feed_fp16 = {'in_0': data.astype(np.float16)} data = np.random.choice([True, False], size=(2, 3, 1)) - self.assign_bool = data.astype(np.bool) + self.assign_bool = data.astype(np.bool_) def _test_base(self, exec_mode): scope = paddle.static.Scope() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_mul_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_mul_int8_mkldnn_op.py index 9265d5f7edfbbd120673ce42a7eb571a6afc7af3..8764fc0a8121b45d5265dc05b0734d6dda902d05 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_mul_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_mul_int8_mkldnn_op.py @@ -62,7 +62,7 @@ class TestMKLDNNMulOpS8S8(OpTest): B_data = np.random.uniform(-127, 127, (5, 20)).astype(np.float32) - quant_B = np.round(B_data * self.scale_y[0]).astype(np.int) + quant_B = np.round(B_data * self.scale_y[0]).astype(np.int_) output = np.dot(A_data, quant_B) scale_output_shift = (self.scale_out) / \ @@ -131,7 +131,7 @@ class TestMKLDNNMulOpS8S8WithFlatten(TestMKLDNNMulOpS8S8): A_data_reshape = A_data.reshape(3 * 4, 4 * 3) B_data_reshape = B_data.reshape(2 * 6, 1 * 2 * 3) - quant_B = np.round(B_data_reshape * self.scale_y[0]).astype(np.int) + quant_B = np.round(B_data_reshape * self.scale_y[0]).astype(np.int_) output = np.dot(A_data_reshape, quant_B) scale_output_shift = (self.scale_out) / \ diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_bf16_mkldnn_op.py index e9b0cafd11495c8403702fb410b42700f52b3d01..d678025ee4f371aef41343a04b79760de2016d33 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_bf16_mkldnn_op.py @@ -46,7 +46,7 @@ class TestSoftmaxMKLDNNOp(TestSoftmaxOp): self.shape = self.get_x_shape() self.axis = self.get_axis() - x = np.random.uniform(0.1, 1, self.shape).astype(np.float) + x = np.random.uniform(0.1, 1, self.shape).astype(np.float64) out = convert_float_to_uint16( np.apply_along_axis(stable_softmax, self.axis, x)) diff --git a/python/paddle/fluid/tests/unittests/npu/test_box_coder_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_box_coder_op_npu.py index 4d4d61ace841e8756043e5dc37dabd0114543944..7f485e5e9b992f1d8dc3e2c9d507b63c6b6a7a8f 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_box_coder_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_box_coder_op_npu.py @@ -190,7 +190,7 @@ class TestBoxCoderOp(OpTest): } if self.use_variance: self.attrs['variance'] = self.prior_box_var.astype( - np.float).flatten() + np.float64).flatten() if self.axis != 0: self.attrs['axis'] = self.axis diff --git a/python/paddle/fluid/tests/unittests/npu/test_density_prior_box_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_density_prior_box_op_npu.py index a190aa9b6f2be543720ee83422ce261531f2a212..11a8565103f40ef1ff6d9909d7dbcc42c08584ec 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_density_prior_box_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_density_prior_box_op_npu.py @@ -82,7 +82,7 @@ class TestNpuDensityPriorBoxOp(OpTest): self.batch_size = 10 self.variances = [0.1, 0.1, 0.2, 0.2] - self.variances = np.array(self.variances, dtype=np.float).flatten() + self.variances = np.array(self.variances, dtype=np.float64).flatten() self.clip = True self.num_priors = 0 diff --git a/python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py index 152a454805576a31f8887b95e06090044c578d1a..0aa286edc2ce842af466d7943d0d683e02a5ca72 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_fill_constant_op_npu.py @@ -138,7 +138,7 @@ class TestFillConstantBool(OpTest): self.__class__.use_npu = True def init_dtype(self): - self.dtype = np.BOOL + self.dtype = np.bool_ def test_check_output(self): self.check_output_with_place(self.place) diff --git a/python/paddle/fluid/tests/unittests/npu/test_fill_zeros_like_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_fill_zeros_like_op_npu.py index e00aa6971ebeb9f1d9367e31c2523055dee5430d..5c21358691b83e0649b8d5be67c59cf33e3e514a 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_fill_zeros_like_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_fill_zeros_like_op_npu.py @@ -45,7 +45,7 @@ class TestFillZerosLikeOp(OpTest): class TestFillZerosLikeOpBool(TestFillZerosLikeOp): def init_dtype(self): - self.dtype = np.bool + self.dtype = np.bool_ class TestFillZerosLikeOpFp16(TestFillZerosLikeOp): diff --git a/python/paddle/fluid/tests/unittests/npu/test_prior_box_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_prior_box_op_npu.py index 47b78d308205c58c789ed3ababce662feb8e9c7a..6447c184454d26d8c2c1ddcd29588a3ac30c8901 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_prior_box_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_prior_box_op_npu.py @@ -94,9 +94,9 @@ class TestNPUPriorBox(OpTest): self.set_min_max_aspect_ratios_order() self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0] self.aspect_ratios = np.array( - self.aspect_ratios, dtype=np.float).flatten() + self.aspect_ratios, dtype=np.float64).flatten() self.variances = [0.1, 0.1, 0.2, 0.2] - self.variances = np.array(self.variances, dtype=np.float).flatten() + self.variances = np.array(self.variances, dtype=np.float64).flatten() self.clip = True self.num_priors = len(self.real_aspect_ratios) * len(self.min_sizes) diff --git a/python/paddle/fluid/tests/unittests/npu/test_reduce_max_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_reduce_max_op_npu.py index 68a28ea72e1fc091b04914de19533534962b0885..64b79a9681ddcfcfd0649dc8f1a770bd7a841f84 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_reduce_max_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_reduce_max_op_npu.py @@ -106,7 +106,7 @@ class TestReduceMaxOpWithOutDtype_bool(TestNPUReduceMaxOp): } self.outputs = { 'Out': - self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype(np.bool) + self.inputs['X'].max(axis=tuple(self.attrs['dim'])).astype(np.bool_) } diff --git a/python/paddle/fluid/tests/unittests/npu/test_reduce_min_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_reduce_min_op_npu.py index bbf23e1be3e0e861b614d198f51fa8ff39e0b462..fb2b769312a4d3b3b12763b940c03724badfd82d 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_reduce_min_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_reduce_min_op_npu.py @@ -106,7 +106,7 @@ class TestReduceMinOpWithOutDtype_bool(TestNPUReduceMinOp): } self.outputs = { 'Out': - self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype(np.bool) + self.inputs['X'].min(axis=tuple(self.attrs['dim'])).astype(np.bool_) } diff --git a/python/paddle/fluid/tests/unittests/npu/test_reduce_prod_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_reduce_prod_op_npu.py index 59f181be5edacb3d609f6ab827439c4c48860220..285a035a8d9b7e0ad1d3c5d01165f4e579609e6c 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_reduce_prod_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_reduce_prod_op_npu.py @@ -121,8 +121,8 @@ class TestNPUReduceProdWithOutDtype_bool(TestNPUReduceProd): self.inputs = {'X': np.random.random((5, 6, 10)).astype(self.dtype)} self.attrs = {'dim': [0], 'out_dtype': int(core.VarDesc.VarType.BOOL)} self.outputs = { - 'Out': - self.inputs['X'].prod(axis=tuple(self.attrs['dim'])).astype(np.bool) + 'Out': self.inputs['X'].prod( + axis=tuple(self.attrs['dim'])).astype(np.bool_) } diff --git a/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py index 80721cbd66a5589b0361fb44e79db3f00339fe0c..26a943e3ab598848bfa8760883733149eac33183 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_size_op_npu.py @@ -66,7 +66,7 @@ class TestSizeOp3(TestSizeOp): class TestSizeOp4(TestSizeOp): def config(self): self.shape = [2**10] - self.dtype = np.bool + self.dtype = np.bool_ class TestSizeOp5(TestSizeOp): diff --git a/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py index 8239dd4f3fa89d8b496091db52e21b7d020bedd8..29cfbce0bfd4551f6247a220eeab9c6018df2a8f 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_tril_triu_op_npu.py @@ -193,7 +193,7 @@ class TestNPUTrilTriu_bool(TestNPUTrilTriu): self.check_output_with_place(self.place) def init_dtype(self): - self.dtype = np.bool + self.dtype = np.bool_ def initTestCase(self): self.real_op_type = np.random.choice(['triu', 'tril']) diff --git a/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_min_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_min_op_npu.py index 18e2db7f6b1d9153891e84ab0e8ed90f185b8b83..1f347bec02998a78407a9c9107e7278e475f1f79 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_min_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_min_op_npu.py @@ -35,7 +35,7 @@ class TestUpdateLossScalingOpMinLossScalingBad(TestUpdateLossScalingOpBad): self.init() fluid.core.globals()['FLAGS_min_loss_scaling'] = 1639 - found_inf = np.array([True], dtype=np.bool) + found_inf = np.array([True], dtype=np.bool_) x = np.random.random((1024, 1024)).astype(self.dtype) i = np.random.randint(0, 1024, 1) j = np.random.randint(0, 1024, 1) diff --git a/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py index 1388adf609ff62a48832836e461debe09fc9bdca..aa6ffabcf4ea2d303dd1772c47cab77403f5c8f8 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py @@ -32,7 +32,7 @@ class TestUpdateLossScalingOp(OpTest): self.place = paddle.NPUPlace(0) self.init() - found_inf = np.array([False], dtype=np.bool) + found_inf = np.array([False], dtype=np.bool_) x = np.random.random((1024, 1024)).astype(self.dtype) self.inputs = { @@ -79,7 +79,7 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): self.place = paddle.NPUPlace(0) self.init() - found_inf = np.array([True], dtype=np.bool) + found_inf = np.array([True], dtype=np.bool_) x = np.random.random((1024, 1024)).astype(self.dtype) i = np.random.randint(0, 1024, 1) j = np.random.randint(0, 1024, 1) diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 738ed90b12e658ab02ab9eae86b18f699ff73b88..e5c4a3b2e7c39d7aa54b0394e776a5d612d82ec7 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -439,7 +439,7 @@ class OpTest(unittest.TestCase): np.dtype(np.float64), np.dtype(np.float32), np.dtype(np.float16), np.dtype(np.int64), np.dtype(np.int32), np.dtype(np.uint16), np.dtype(np.int16), np.dtype(np.int8), np.dtype(np.uint8), - np.dtype(np.bool) + np.dtype(np.bool_) ] # check the dtype in dtype_list in order, select the first dtype that in dtype_set for dtype in dtype_list: diff --git a/python/paddle/fluid/tests/unittests/test_assign_op.py b/python/paddle/fluid/tests/unittests/test_assign_op.py index bfe23c621270d7117c92c7ba06991acc1fb74d37..e3d1567ff81bd0d411a93ca111f7670e455a2ee0 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_op.py @@ -128,7 +128,7 @@ class TestAssignOApi(unittest.TestCase): def test_assign_NumpyArray(self): with fluid.dygraph.guard(): - array = np.random.random(size=(100, 10)).astype(np.bool) + array = np.random.random(size=(100, 10)).astype(np.bool_) result1 = paddle.zeros(shape=[3, 3], dtype='float32') paddle.assign(array, result1) self.assertTrue(np.allclose(result1.numpy(), array)) diff --git a/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py b/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py index cc2b1165ec304a63671b48d4702142ea38c9a2c1..a1f401778f3ba665b66da0abc4482612baccb184 100644 --- a/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py +++ b/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py @@ -35,7 +35,7 @@ def bipartite_match(distance, match_indices, match_dist): match_sorted = sorted(match_pair, key=lambda tup: tup[2], reverse=True) - row_indices = -1 * np.ones((row, ), dtype=np.int) + row_indices = -1 * np.ones((row, ), dtype=np.int_) idx = 0 for i, j, dist in match_sorted: @@ -69,7 +69,7 @@ def batch_bipartite_match(distance, lod, match_type=None, dist_threshold=None): """ n = len(lod) m = distance.shape[1] - match_indices = -1 * np.ones((n, m), dtype=np.int) + match_indices = -1 * np.ones((n, m), dtype=np.int_) match_dist = np.zeros((n, m), dtype=np.float32) cur_offset = 0 for i in range(n): diff --git a/python/paddle/fluid/tests/unittests/test_box_coder_op.py b/python/paddle/fluid/tests/unittests/test_box_coder_op.py index 220bffebe83925c60af65aa9594ddd8a29c38145..a5e57c96dc52774dcc76d5cd187ab9069a8ba62d 100644 --- a/python/paddle/fluid/tests/unittests/test_box_coder_op.py +++ b/python/paddle/fluid/tests/unittests/test_box_coder_op.py @@ -229,7 +229,7 @@ class TestBoxCoderOpWithVariance(OpTest): self.attrs = { 'code_type': 'decode_center_size', 'box_normalized': False, - 'variance': prior_box_var.astype(np.float).flatten(), + 'variance': prior_box_var.astype(np.float64).flatten(), 'axis': axis } self.outputs = {'OutputBox': output_box} diff --git a/python/paddle/fluid/tests/unittests/test_compare_op.py b/python/paddle/fluid/tests/unittests/test_compare_op.py index bd9ec6b663f604404211cd3a0dad32a5ea37e634..56174845dee86b8efc17d6dc93229cc7a11fdbc9 100755 --- a/python/paddle/fluid/tests/unittests/test_compare_op.py +++ b/python/paddle/fluid/tests/unittests/test_compare_op.py @@ -233,8 +233,8 @@ def create_paddle_case(op_type, callback): op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = paddle.static.Executor(self.place) - input_x = np.array([True, False, True]).astype(np.bool) - input_y = np.array([True, True, False]).astype(np.bool) + input_x = np.array([True, False, True]).astype(np.bool_) + input_y = np.array([True, True, False]).astype(np.bool_) real_result = callback(input_x, input_y) res, = exe.run(feed={"x": input_x, "y": input_y}, @@ -249,8 +249,8 @@ def create_paddle_case(op_type, callback): op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = paddle.static.Executor(self.place) - input_x = np.array([True, False, True]).astype(np.bool) - input_y = np.array([True]).astype(np.bool) + input_x = np.array([True, False, True]).astype(np.bool_) + input_y = np.array([True]).astype(np.bool_) real_result = callback(input_x, input_y) res, = exe.run(feed={"x": input_x, "y": input_y}, diff --git a/python/paddle/fluid/tests/unittests/test_density_prior_box_op.py b/python/paddle/fluid/tests/unittests/test_density_prior_box_op.py index 4b0bc1dcf85fbb384eea09ee286d35ec248aae70..33ffe23f914e11d832f8933b4da42734dad62fbf 100644 --- a/python/paddle/fluid/tests/unittests/test_density_prior_box_op.py +++ b/python/paddle/fluid/tests/unittests/test_density_prior_box_op.py @@ -69,7 +69,7 @@ class TestDensityPriorBoxOp(OpTest): self.batch_size = 10 self.variances = [0.1, 0.1, 0.2, 0.2] - self.variances = np.array(self.variances, dtype=np.float).flatten() + self.variances = np.array(self.variances, dtype=np.float64).flatten() self.clip = True self.num_priors = 0 diff --git a/python/paddle/fluid/tests/unittests/test_fuse_gemm_epilogue_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_gemm_epilogue_pass.py index 7f3180e21d8c63dd3fbc87d58c01f43422a01bcb..4ed8091383e278df975914778bbf3a64ab8227c0 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_gemm_epilogue_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_gemm_epilogue_pass.py @@ -27,7 +27,7 @@ def compare(ref, res, atol, rtol): ref = np.array(ref).flatten() res = np.array(res).flatten() - tmp_ref = ref.astype(np.float) + tmp_ref = ref.astype(np.float64) tol = atol + rtol * abs(tmp_ref) diff = abs(res - ref) diff --git a/python/paddle/fluid/tests/unittests/test_generate_mask_labels_op.py b/python/paddle/fluid/tests/unittests/test_generate_mask_labels_op.py index 1d7ce33ea7ca2c53dc2bb2a7048444c818d4f33f..0244eb7f8213b123354e044164bd123e3791568e 100644 --- a/python/paddle/fluid/tests/unittests/test_generate_mask_labels_op.py +++ b/python/paddle/fluid/tests/unittests/test_generate_mask_labels_op.py @@ -79,8 +79,8 @@ def poly2mask(xy, k, h, w): u.extend([int(xs + s * t + .5) for t in ts]) k = len(u) - x = np.zeros((k), np.int) - y = np.zeros((k), np.int) + x = np.zeros((k), np.int_) + y = np.zeros((k), np.int_) m = 0 for j in six.moves.xrange(1, k): if u[j] != u[j - 1]: @@ -116,7 +116,7 @@ def poly2mask(xy, k, h, w): b[m - 1] += a[j] j += 1 mask = decode(b, m) - mask = np.array(mask, dtype=np.int).reshape((w, h)) + mask = np.array(mask, dtype=np.int_).reshape((w, h)) mask = mask.transpose((1, 0)) return mask diff --git a/python/paddle/fluid/tests/unittests/test_kron_op.py b/python/paddle/fluid/tests/unittests/test_kron_op.py index f4d013b7c6a3eacd24cd3c0e8a10e51902aa65e6..d3c4a520e83288de0e8c415a7355c095bb037a24 100644 --- a/python/paddle/fluid/tests/unittests/test_kron_op.py +++ b/python/paddle/fluid/tests/unittests/test_kron_op.py @@ -146,7 +146,7 @@ class TestComplexKronOp(OpTest): self.grad_y = self.get_grad_y_by_numpy() def get_grad_x_by_numpy(self): - grad_x = np.zeros(self.x_shape, np.complex) + grad_x = np.zeros(self.x_shape, np.complex128) for x_i in range(self.x_shape[0]): for x_j in range(self.x_shape[1]): for i in range(self.y_shape[0]): @@ -158,7 +158,7 @@ class TestComplexKronOp(OpTest): return grad_x def get_grad_y_by_numpy(self): - grad_y = np.zeros(self.y_shape, np.complex) + grad_y = np.zeros(self.y_shape, np.complex128) for y_i in range(self.y_shape[0]): for y_j in range(self.y_shape[1]): for x_i in range(self.x_shape[0]): diff --git a/python/paddle/fluid/tests/unittests/test_ones_like.py b/python/paddle/fluid/tests/unittests/test_ones_like.py index db7fc9d2b2e997d434407380a392aa8dc3a7b7a8..f52fcee537ad3094b4746926221a8166b7561cb5 100644 --- a/python/paddle/fluid/tests/unittests/test_ones_like.py +++ b/python/paddle/fluid/tests/unittests/test_ones_like.py @@ -39,7 +39,7 @@ class TestOnesLikeAPI(unittest.TestCase): # 'bool', 'float32', 'float64', 'int32', 'int64' out1 = ones_like(x) - out2 = ones_like(x, np.bool) + out2 = ones_like(x, np.bool_) out3 = ones_like(x, 'float64') out4 = ones_like(x, 'int32') out5 = ones_like(x, 'int64') @@ -52,7 +52,7 @@ class TestOnesLikeAPI(unittest.TestCase): fetch_list=[out1, out2, out3, out4, out5]) for i, dtype in enumerate( - [np.float32, np.bool, np.float64, np.int32, np.int64]): + [np.float32, np.bool_, np.float64, np.int32, np.int64]): self.assertEqual(outs[i].dtype, dtype) self.assertEqual((outs[i] == np.ones(shape, dtype)).all(), True) @@ -64,7 +64,7 @@ class TestOnesLikeImpeartive(unittest.TestCase): ) else fluid.CPUPlace() paddle.disable_static(place) x = paddle.to_tensor(np.ones(shape)) - for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]: + for dtype in [np.bool_, np.float32, np.float64, np.int32, np.int64]: out = ones_like(x, dtype) self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), True) diff --git a/python/paddle/fluid/tests/unittests/test_prior_box_op.py b/python/paddle/fluid/tests/unittests/test_prior_box_op.py index 7381b74af71051f8b993ba6d116b5282dd9b84e1..463a0c7f6386303656f37c9cdf4900e3b03867d5 100644 --- a/python/paddle/fluid/tests/unittests/test_prior_box_op.py +++ b/python/paddle/fluid/tests/unittests/test_prior_box_op.py @@ -80,9 +80,9 @@ class TestPriorBoxOp(OpTest): self.set_min_max_aspect_ratios_order() self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0] self.aspect_ratios = np.array( - self.aspect_ratios, dtype=np.float).flatten() + self.aspect_ratios, dtype=np.float64).flatten() self.variances = [0.1, 0.1, 0.2, 0.2] - self.variances = np.array(self.variances, dtype=np.float).flatten() + self.variances = np.array(self.variances, dtype=np.float64).flatten() self.clip = True self.num_priors = len(self.real_aspect_ratios) * len(self.min_sizes) diff --git a/python/paddle/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py index 01d386724d1613b6523a322df97b8dcbc1cdf42a..ad1a8c455064898a11d881402e2da754a2e05d85 100644 --- a/python/paddle/fluid/tests/unittests/test_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_reduce_op.py @@ -905,7 +905,7 @@ class TestAllAPI(unittest.TestCase): paddle.disable_static() for place in self.places: with fluid.dygraph.guard(place): - np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool) + np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_) x = fluid.layers.assign(np_x) x = fluid.layers.cast(x, 'bool') @@ -960,7 +960,7 @@ class TestAnyAPI(unittest.TestCase): paddle.disable_static() for place in self.places: with fluid.dygraph.guard(place): - np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool) + np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_) x = fluid.layers.assign(np_x) x = fluid.layers.cast(x, 'bool') diff --git a/python/paddle/fluid/tests/unittests/test_signal.py b/python/paddle/fluid/tests/unittests/test_signal.py index ecbbd8f52db9b5c5ca6f19ca000d7d15a2fc65fc..0dad76b784e00df4e79d8eb7de0ba419e4fb225d 100644 --- a/python/paddle/fluid/tests/unittests/test_signal.py +++ b/python/paddle/fluid/tests/unittests/test_signal.py @@ -81,7 +81,7 @@ def normalize(S, norm=np.inf, axis=0, threshold=None, fill=None): raise Exception("Input must be finite") # All norms only depend on magnitude, let's do that first - mag = np.abs(S).astype(np.float) + mag = np.abs(S).astype(np.float64) # For max/min norms, filling with 1 works fill_norm = 1 diff --git a/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py b/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py index 56f49f60bde847c2345cb1d003e4ee949c43dce4..dcc382b7485c9f5c5a63a7cd9d698336cfd1a598 100644 --- a/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py +++ b/python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py @@ -23,7 +23,7 @@ class TestUpdateLossScalingOp(OpTest): def setUp(self): self.op_type = "update_loss_scaling" self.init() - found_inf = np.array([False], dtype=np.bool) + found_inf = np.array([False], dtype=np.bool_) x = np.random.random((1024, 1024)).astype(self.dtype) self.inputs = { @@ -64,7 +64,7 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): def setUp(self): self.op_type = "update_loss_scaling" self.init() - found_inf = np.array([True], dtype=np.bool) + found_inf = np.array([True], dtype=np.bool_) x = np.random.random((1024, 1024)).astype(self.dtype) i = np.random.randint(0, 1024, 1) j = np.random.randint(0, 1024, 1) diff --git a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py index 80b4db793ff439d6858c6e74db869ac75bd5f23c..1e0aaf93d6576e5a112b51afd64b4726251ab8f1 100644 --- a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py @@ -41,7 +41,7 @@ class TestZerosLikeAPI(unittest.TestCase): with program_guard(train_program, startup_program): x = paddle.fluid.data('X', shape) out1 = zeros_like(x) - out2 = zeros_like(x, np.bool) + out2 = zeros_like(x, np.bool_) out3 = zeros_like(x, 'float64') out4 = zeros_like(x, 'int32') out5 = zeros_like(x, 'int64') @@ -52,7 +52,7 @@ class TestZerosLikeAPI(unittest.TestCase): feed={'X': np.ones(shape).astype('float32')}, fetch_list=[out1, out2, out3, out4, out5]) for (i, dtype) in enumerate( - [np.float32, np.bool, np.float64, np.int32, np.int64]): + [np.float32, np.bool_, np.float64, np.int32, np.int64]): self.assertEqual(outs[i].dtype, dtype) self.assertEqual((outs[i] == np.zeros(shape, dtype)).all(), True) @@ -68,7 +68,7 @@ class TestZerosLikeImpeartive(unittest.TestCase): if core.is_compiled_with_cuda() else fluid.CPUPlace()) paddle.disable_static(place) x = paddle.to_tensor(np.ones(shape)) - for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]: + for dtype in [np.bool_, np.float32, np.float64, np.int32, np.int64]: out = zeros_like(x, dtype) self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_compare_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_compare_op_xpu.py index 5496c53a420b94f91a51d0c714c24cbf060d2dc6..0c1ae59d837d162af66c8ad90d5a8ad419d17b73 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_compare_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_compare_op_xpu.py @@ -223,8 +223,8 @@ def create_paddle_case(op_type, callback): op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = paddle.static.Executor(self.place) - input_x = np.array([True, False, True]).astype(np.bool) - input_y = np.array([True, True, False]).astype(np.bool) + input_x = np.array([True, False, True]).astype(np.bool_) + input_y = np.array([True, True, False]).astype(np.bool_) real_result = callback(input_x, input_y) res, = exe.run(feed={"x": input_x, "y": input_y}, @@ -239,8 +239,8 @@ def create_paddle_case(op_type, callback): op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = paddle.static.Executor(self.place) - input_x = np.array([True, False, True]).astype(np.bool) - input_y = np.array([True]).astype(np.bool) + input_x = np.array([True, False, True]).astype(np.bool_) + input_y = np.array([True]).astype(np.bool_) real_result = callback(input_x, input_y) res, = exe.run(feed={"x": input_x, "y": input_y}, diff --git a/python/paddle/fluid/tests/unittests/xpu/test_prior_box_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_prior_box_op_xpu.py index 0830237d5a89d8397db129421158f143c79582fc..d3ebb87d309b24f9f23996956b25bef4f6ecaf84 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_prior_box_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_prior_box_op_xpu.py @@ -95,9 +95,10 @@ class XPUTestPriorBoxOp(XPUOpTestWrapper): self.set_min_max_aspect_ratios_order() self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0] self.aspect_ratios = np.array( - self.aspect_ratios, dtype=np.float).flatten() + self.aspect_ratios, dtype=np.float64).flatten() self.variances = [0.1, 0.1, 0.2, 0.2] - self.variances = np.array(self.variances, dtype=np.float).flatten() + self.variances = np.array( + self.variances, dtype=np.float64).flatten() self.clip = True self.num_priors = len(self.real_aspect_ratios) * len(self.min_sizes) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py index 33b13081b54420841a521afd7573c0cb8788ecb6..391763b93add1402ff605fcb6bd251f03e14e65f 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py @@ -29,7 +29,7 @@ class TestUpdateLossScalingOp(XPUOpTest): def setUp(self): self.op_type = "update_loss_scaling" self.init() - found_inf = np.array([False], dtype=np.bool) + found_inf = np.array([False], dtype=np.bool_) x = np.random.random((1024, 1024)).astype(self.dtype) self.inputs = { @@ -72,7 +72,7 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp): def setUp(self): self.op_type = "update_loss_scaling" self.init() - found_inf = np.array([True], dtype=np.bool) + found_inf = np.array([True], dtype=np.bool_) x = np.random.random((1024, 1024)).astype(self.dtype) i = np.random.randint(0, 1024, 1) j = np.random.randint(0, 1024, 1) diff --git a/python/paddle/nn/layer/conv.py b/python/paddle/nn/layer/conv.py index bb1cbbfc03e550f0d57c7dc30aecfa6e12ce2f75..b7f002081fe9aae144346a1397657350ca78d1aa 100644 --- a/python/paddle/nn/layer/conv.py +++ b/python/paddle/nn/layer/conv.py @@ -75,7 +75,7 @@ class _ConvNd(Layer): format(valid_padding_modes, padding_mode)) if padding_mode in {'reflect', 'replicate', 'circular' - } and not isinstance(padding, np.int): + } and not isinstance(padding, int): raise TypeError( "when padding_mode in ['reflect', 'replicate', 'circular'], type of padding must be int" ) diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 5c561060564ec1676ec3b8161fcc7c989a06940e..b7f647f4263a3713f4b57ff710fa881138cbe5a7 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -122,7 +122,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True): data = np.array([data]) elif isinstance(data, (list, tuple)): data = np.array(data) - if data.dtype == np.object: + if data.dtype == np.object_: raise ValueError( "\n\tFaild to convert input data to a regular ndarray :\n\t - Usually " "this means the input data contains nested lists with different lengths. " diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 1be53407688115ee47e0e0d7f6c066208933efc1..edee0d10c327371688b0b39fa9fe55a2ee5cd47e 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -348,7 +348,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] - reduce_type = 'reduce_max' if porder == np.float( + reduce_type = 'reduce_max' if porder == np.float64( 'inf') else 'reduce_min' helper.append_op( type=reduce_type,