未验证 提交 97701612 编写于 作者: Z Zheng-Bicheng 提交者: GitHub

[Test Mv] remove npu (#52022)

上级 8cc48b2e
......@@ -852,10 +852,6 @@ if(WIN32)
DEPS python)
endif()
if(WITH_ASCEND_CL)
add_subdirectory(npu)
endif()
if(WITH_MKLDNN)
add_subdirectory(mkldnn)
endif()
......
......@@ -12,26 +12,164 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import numpy as np
from paddle import enable_static
from paddle.fluid import core
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
convert_float_to_uint16,
)
from paddle.fluid.tests.unittests.npu.test_pool2d_op_npu import (
pool2d_backward_navie as pool2d_backward_naive,
)
from paddle.fluid.tests.unittests.test_pool2d_op import (
sys.path.append("..")
from op_test import OpTest, OpTestTool, convert_float_to_uint16
from test_pool2d_op import (
TestPool2D_Op_Mixin,
adaptive_end_index,
adaptive_start_index,
max_pool2D_forward_naive,
)
def pool2d_backward_naive(
x,
ksize,
strides,
paddings,
global_pool=0,
ceil_mode=False,
exclusive=True,
adaptive=False,
data_format='NCHW',
pool_type="max",
padding_algorithm="EXPLICIT",
):
# update paddings
def _get_padding_with_SAME(input_shape, pool_size, pool_stride):
padding = []
for input_size, filter_size, stride_size in zip(
input_shape, pool_size, pool_stride
):
out_size = int((input_size + stride_size - 1) / stride_size)
pad_sum = np.max(
((out_size - 1) * stride_size + filter_size - input_size, 0)
)
pad_0 = int(pad_sum / 2)
pad_1 = int(pad_sum - pad_0)
padding.append(pad_0)
padding.append(pad_1)
return padding
if isinstance(padding_algorithm, str):
padding_algorithm = padding_algorithm.upper()
if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
raise ValueError(
"Unknown Attr(padding_algorithm): '%s'. "
"It can only be 'SAME' or 'VALID'." % str(padding_algorithm)
)
if padding_algorithm == "VALID":
paddings = [0, 0, 0, 0]
if ceil_mode is not False:
raise ValueError(
"When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)"
" must be False. "
"Received ceil_mode: True."
)
elif padding_algorithm == "SAME":
input_data_shape = []
if data_format == "NCHW":
input_data_shape = x.shape[2:4]
elif data_format == "NHWC":
input_data_shape = x.shape[1:3]
paddings = _get_padding_with_SAME(input_data_shape, ksize, strides)
assert len(paddings) == 2 or len(paddings) == 4
is_sys = True if len(paddings) == 2 else False
if data_format == "NHWC":
x = x.transpose([0, 3, 1, 2])
N, C, H, W = x.shape
if global_pool == 1:
ksize = [H, W]
paddings = [0 for _ in range(len(paddings))]
pad_h_up = paddings[0] if is_sys else paddings[0]
pad_h_down = paddings[0] if is_sys else paddings[1]
pad_w_left = paddings[1] if is_sys else paddings[2]
pad_w_right = paddings[1] if is_sys else paddings[3]
if adaptive:
H_out, W_out = ksize
else:
H_out = (
(H - ksize[0] + pad_h_up + pad_h_down + strides[0] - 1)
// strides[0]
+ 1
if ceil_mode
else (H - ksize[0] + pad_h_up + pad_h_down) // strides[0] + 1
)
W_out = (
(W - ksize[1] + pad_w_left + pad_w_right + strides[1] - 1)
// strides[1]
+ 1
if ceil_mode
else (W - ksize[1] + pad_w_left + pad_w_right) // strides[1] + 1
)
x_grad = np.zeros_like(x)
for i in range(H_out):
if adaptive:
in_h_start = adaptive_start_index(i, H, ksize[0])
in_h_end = adaptive_end_index(i, H, ksize[0])
else:
in_h_start = np.max((i * strides[0] - pad_h_up, 0))
in_h_end = np.min((i * strides[0] + ksize[0] - pad_h_up, H))
for j in range(W_out):
if adaptive:
in_w_start = adaptive_start_index(j, W, ksize[1])
in_w_end = adaptive_end_index(j, W, ksize[1])
else:
in_h_start = i * strides[0] - pad_h_up
in_w_start = j * strides[1] - pad_w_left
in_h_end = i * strides[0] + ksize[0] - pad_h_up
in_w_end = j * strides[1] + ksize[1] - pad_w_left
field_size = (in_h_end - in_h_start) * (in_w_end - in_w_start)
in_h_start = np.max((in_h_start, 0))
in_w_start = np.max((in_w_start, 0))
in_h_end = np.min((in_h_end, H))
in_w_end = np.min((in_w_end, W))
if pool_type == 'avg':
if exclusive or adaptive:
field_size = (in_h_end - in_h_start) * (
in_w_end - in_w_start
)
x_grad[:, :, in_h_start:in_h_end, in_w_start:in_w_end] += (
1 / field_size
)
elif pool_type == 'max':
for n in range(N):
for c in range(C):
idx = np.argmax(
x[
n, c, in_h_start:in_h_end, in_w_start:in_w_end
].flatten()
)
idx_h = idx // (in_w_end - in_w_start)
idx_w = idx % (in_w_end - in_w_start)
x_grad[
n, c, in_h_start + idx_h, in_w_start + idx_w
] += 1
if data_format == "NHWC":
x_grad = x_grad.transpose([0, 2, 3, 1])
return x_grad
@OpTestTool.skip_if_not_cpu_bf16()
class TestPoolBf16MklDNNOpGrad(TestPool2D_Op_Mixin, OpTest):
def init_kernel_type(self):
......
file(
GLOB TEST_OPS
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}"
"test_*.py")
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
if(WITH_ASCEND_CL)
foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach()
# NOTE: NPU `get_float_status` read the value from register, During the test,
# it is found that this register will be overwritten by any program on the
# card. In order to prevent the interference of nan/inf in the other
# unittests, we need to set the unittests related to `float_status` to
# exclusive.
set_tests_properties(test_amp_check_finite_and_scale_op_npu
PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE")
set_tests_properties(test_flags_check_nan_inf_npu
PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE")
set_tests_properties(test_float_status_op_npu PROPERTIES LABELS
"RUN_TYPE=EXCLUSIVE")
# Note: the following test cases has running time more than 120s
set_tests_properties(test_nearest_interp_op_npu PROPERTIES TIMEOUT 200)
set_tests_properties(test_nearest_interp_v2_op_npu PROPERTIES TIMEOUT 200)
set_tests_properties(test_bilinear_interp_v2_op_npu PROPERTIES TIMEOUT 200)
set_tests_properties(test_stack_op_npu PROPERTIES TIMEOUT 300)
set_tests_properties(test_conv2d_transpose_op_npu PROPERTIES TIMEOUT 200)
set_tests_properties(test_conv2d_op_npu PROPERTIES TIMEOUT 300)
set_tests_properties(test_matmulv2_op_npu PROPERTIES TIMEOUT 300)
set_tests_properties(test_elementwise_add_op_npu PROPERTIES TIMEOUT 200)
endif()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import os
import sys
import signal
import time
from contextlib import closing
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import paddle.fluid.unique_name as nameGen
from paddle.fluid import core
import unittest
from multiprocessing import Process
import paddle.fluid.layers as layers
from functools import reduce
from test_collective_base_npu import TestCollectiveRunnerBase, runtime_main
paddle.enable_static()
class TestCollectiveIdentity(TestCollectiveRunnerBase):
def __init__(self):
self.global_ring_id = 0
def get_model(self, main_prog, startup_program):
ring_id = 0
nranks = 2
with fluid.program_guard(main_prog, startup_program):
tindata = paddle.static.data(
name="tindata", shape=[-1, 10, 1000], dtype='float32'
)
toutdata = main_prog.current_block().create_var(
name="outofgather",
dtype='float32',
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=False,
)
main_prog.global_block().append_op(
type="c_identity",
inputs={'X': tindata},
outputs={'Out': toutdata},
attrs={'ring_id': ring_id, 'nranks': nranks},
)
return toutdata
if __name__ == "__main__":
runtime_main(TestCollectiveIdentity, "identity", 0)
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import os
import sys
sys.path.append("..")
import signal
import time
from contextlib import closing
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import paddle.fluid.unique_name as nameGen
from paddle.fluid import core
import unittest
from multiprocessing import Process
import paddle.fluid.layers as layers
from functools import reduce
from test_sync_batch_norm_base_npu import (
TestSyncBatchNormRunnerBase,
runtime_main,
)
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
_set_use_system_allocator,
)
from paddle.fluid.tests.unittests.test_sync_batch_norm_op import (
create_or_get_tensor,
)
_set_use_system_allocator(False)
paddle.enable_static()
class TestSyncBatchNormOpTraining(TestSyncBatchNormRunnerBase):
def __init__(self):
self.global_ring_id = 0
self.dtype = np.float32
self.N = 8
self.C = 16
self.H = 32
self.W = 32
self.dshape = [self.N, self.C, self.H, self.W]
self.atol = 1e-3
def get_model(
self,
main,
startup,
place,
layout,
seed,
sync_bn=False,
only_forward=False,
):
"""Build program."""
use_cudnn = False
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
data = paddle.static.data(
name='input',
shape=[-1] + self.dshape,
dtype=self.dtype,
)
conv = paddle.static.nn.conv2d(
input=data,
num_filters=32,
filter_size=1,
param_attr=fluid.ParamAttr(name='conv2d_weight'),
bias_attr=False,
use_cudnn=use_cudnn,
)
bn = paddle.static.nn.batch_norm(
conv,
param_attr=fluid.ParamAttr(name='bn_scale'),
bias_attr=fluid.ParamAttr(name='bn_bias'),
moving_mean_name='bn_moving_mean',
moving_variance_name='bn_moving_variance',
data_layout=layout,
is_test=only_forward,
)
# if self.dtype == np.float16:
# bn = fluid.layers.cast(bn, 'float32')
sigmoid = paddle.nn.functional.sigmoid(bn)
out = paddle.sum(sigmoid)
# if not sync_bn:
# out = out / core.get_npu_device_count()
if not only_forward:
sgd_opt = fluid.optimizer.SGD(learning_rate=0.0)
sgd_opt.backward(out)
return [out, conv, bn]
if __name__ == "__main__":
# print('sync_batch_norm_op_npu.py __main__')
runtime_main(TestSyncBatchNormOpTraining, "identity", 0)
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
paddle.enable_static()
class TestNPUAbs(OpTest):
def setUp(self):
self.op_type = "abs"
self.set_npu()
self.init_dtype()
np.random.seed(1024)
x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype)
# Because we set delta = 0.005 in calculating numeric gradient,
# if x is too small, such as 0.002, x_neg will be -0.003
# x_pos will be 0.007, so the numeric gradient is inaccurate.
# we should avoid this
x[np.abs(x) < 0.005] = 0.02
out = np.abs(x)
self.inputs = {'X': x}
self.outputs = {'Out': out}
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
# To-do(qili93): numeric_place will use CPUPlace in eager_op_test.py and abs do not have CPUKernel for float16, to be uncommented after numeric_place fixed
# @unittest.skipIf(not paddle.is_compiled_with_npu(), "core is not compiled with NPU")
# class TestNPUAbsFP16(TestNPUAbs):
# def init_dtype(self):
# self.dtype = np.float16
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
paddle.enable_static()
SEED = 2021
class TestAccuracy(OpTest):
def setUp(self):
self.op_type = "accuracy"
self.set_npu()
self.init_dtype()
np.random.seed(SEED)
n = 8192
infer = np.random.random((n, 1)).astype(self.dtype)
indices = np.random.randint(0, 2, (n, 1)).astype('int64')
label = np.random.randint(0, 2, (n, 1)).astype('int64')
self.inputs = {'Out': infer, 'Indices': indices, "Label": label}
num_correct = 0
for rowid in range(n):
for ele in indices[rowid]:
if ele == label[rowid]:
num_correct += 1
break
self.outputs = {
'Accuracy': np.array([num_correct / float(n)]).astype(self.dtype),
'Correct': np.array([num_correct]).astype("int32"),
'Total': np.array([n]).astype("int32"),
}
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place)
class TestAccuracy2(TestAccuracy):
def setUp(self):
self.op_type = "accuracy"
self.set_npu()
self.init_dtype()
np.random.seed(SEED)
n = 8192
infer = np.random.random((n, 100)).astype(self.dtype)
indices = np.random.randint(0, 1000, (n, 100)).astype('int64')
label = np.random.randint(0, 1000, (n, 1)).astype('int64')
self.inputs = {'Out': infer, 'Indices': indices, "Label": label}
num_correct = 0
for rowid in range(n):
for ele in indices[rowid]:
if ele == label[rowid]:
num_correct += 1
break
self.outputs = {
'Accuracy': np.array([num_correct / float(n)]).astype(self.dtype),
'Correct': np.array([num_correct]).astype("int32"),
'Total': np.array([n]).astype("int32"),
}
class TestAccuracyType(TestAccuracy):
def setUp(self):
self.op_type = "accuracy"
self.set_npu()
self.init_dtype()
np.random.seed(SEED)
n = 8192
infer = np.random.random((n, 100)).astype(self.dtype)
indices = np.random.randint(0, 1000, (n, 100)).astype('int64')
label = np.random.randint(0, 1000, (n, 1)).astype('int32')
self.inputs = {'Out': infer, 'Indices': indices, "Label": label}
num_correct = 0
for rowid in range(n):
for ele in indices[rowid]:
if ele == label[rowid]:
num_correct += 1
break
self.outputs = {
'Accuracy': np.array([num_correct / float(n)]).astype(self.dtype),
'Correct': np.array([num_correct]).astype("int32"),
'Total': np.array([n]).astype("int32"),
}
class TestAccuracyType2(TestAccuracy):
def setUp(self):
self.op_type = "accuracy"
self.set_npu()
self.init_dtype()
np.random.seed(SEED)
n = 8192
infer = np.random.random((n, 100)).astype(self.dtype)
indices = np.random.randint(0, 1000, (n, 100)).astype('int32')
label = np.random.randint(0, 1000, (n, 1)).astype('int64')
self.inputs = {'Out': infer, 'Indices': indices, "Label": label}
num_correct = 0
for rowid in range(n):
for ele in indices[rowid]:
if ele == label[rowid]:
num_correct += 1
break
self.outputs = {
'Accuracy': np.array([num_correct / float(n)]).astype(self.dtype),
'Correct': np.array([num_correct]).astype("int32"),
'Total': np.array([n]).astype("int32"),
}
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from test_adam_op import adam_step
paddle.enable_static()
SEED = 2021
class TestAdam(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "adam"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.004
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
}
self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2}
param_out, moment1_out, moment2_out = adam_step(self.inputs, self.attrs)
self.outputs = {
'Moment1Out': moment1_out,
'Moment2Out': moment2_out,
'ParamOut': param_out,
'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1,
'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2,
}
def set_npu(self):
self.__class__.use_npu = True
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5)
class TestAdamWithEpsilonTensor(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "adam"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.004
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
'Beta1Tensor': np.array([beta1]).astype("float32"),
'Beta2Tensor': np.array([beta2]).astype("float32"),
'EpsilonTensor': np.array([epsilon]).astype("float32"),
}
self.attrs = {'epsilon': epsilon}
param_out, moment1_out, moment2_out = adam_step(self.inputs, self.attrs)
self.outputs = {
'Moment1Out': moment1_out,
'Moment2Out': moment2_out,
'ParamOut': param_out,
'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1,
'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2,
}
def set_npu(self):
self.__class__.use_npu = True
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5)
class TestAdamOpWithSkipUpdate(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "adam"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.004
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
'Beta1Tensor': np.array([beta1]).astype("float32"),
'Beta2Tensor': np.array([beta2]).astype("float32"),
'EpsilonTensor': np.array([epsilon]).astype("float32"),
"SkipUpdate": np.array([True]).astype("bool"),
}
self.attrs = {'epsilon': epsilon}
self.outputs = {
'Moment1Out': moment1,
'Moment2Out': moment2,
'ParamOut': param,
'Beta1PowOut': self.inputs['Beta1Pow'],
'Beta2PowOut': self.inputs['Beta2Pow'],
}
def set_npu(self):
self.__class__.use_npu = True
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5)
class TestAdamOpWithGlobalBetaPow(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "adam"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.004
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
'Beta1Tensor': np.array([beta1]).astype("float32"),
'Beta2Tensor': np.array([beta2]).astype("float32"),
'EpsilonTensor': np.array([epsilon]).astype("float32"),
}
attributes = {'epsilon': epsilon}
param_out, moment1_out, moment2_out = adam_step(self.inputs, attributes)
self.attrs = {'use_global_beta_pow': True}
# use_global_beta_pow=True, Beta1PowOut and Beta2PowOut are empty.
self.outputs = {
'Moment1Out': moment1_out,
'Moment2Out': moment2_out,
'ParamOut': param_out,
'Beta1PowOut': np.array([]),
'Beta2PowOut': np.array([]),
}
def set_npu(self):
self.__class__.use_npu = True
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5)
class TestNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
np.random.seed(SEED)
a_np = np.random.random(size=(32, 32)).astype('float32')
b_np = np.random.random(size=(32, 32)).astype('float32')
label_np = np.random.randint(2, size=(32, 1)).astype('int64')
with paddle.static.program_guard(main_prog, startup_prog):
a = paddle.static.data(name="a", shape=[32, 32], dtype='float32')
b = paddle.static.data(name="b", shape=[32, 32], dtype='float32')
label = paddle.static.data(
name="label", shape=[32, 1], dtype='int64'
)
sum = paddle.add(a, b)
z = paddle.pow(sum, 2.0)
fc_1 = paddle.static.nn.fc(x=z, size=128)
prediction = paddle.static.nn.fc(x=fc_1, size=2, activation='softmax')
cost = paddle.nn.functional.cross_entropy(input=prediction, label=label, reduction='none', use_softmax=False)
loss = paddle.mean(cost)
adam = fluid.optimizer.Adam(learning_rate=0.01)
adam.minimize(loss)
if run_npu:
place = paddle.NPUPlace(0)
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
print("Start run on {}".format(place))
for epoch in range(100):
pred_res, loss_res = exe.run(
main_prog,
feed={"a": a_np, "b": b_np, "label": label_np},
fetch_list=[prediction, loss],
)
if epoch % 10 == 0:
print(
"Epoch {} | Prediction[0]: {}, Loss: {}".format(
epoch, pred_res[0], loss_res
)
)
return pred_res, loss_res
def test_npu(self):
cpu_pred, cpu_loss = self._test(False)
npu_pred, npu_loss = self._test(True)
np.testing.assert_allclose(npu_pred, cpu_pred, rtol=1e-3)
np.testing.assert_allclose(npu_loss, cpu_loss, rtol=1e-3)
class TestNetWithEpsilonTensor(unittest.TestCase):
def _test(
self,
place,
use_tensor=True,
use_fluid_api=True,
use_global_beta_pow=False,
flatten_param_grads=False,
):
paddle.enable_static()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
SEED = 2021
paddle.seed(SEED)
np.random.seed(SEED)
a_np = np.random.random(size=(2, 2)).astype('float32')
b_np = np.random.random(size=(2, 2)).astype('float32')
label_np = np.random.randint(2, size=(2, 1)).astype('int64')
weight_attr1 = paddle.ParamAttr(
name="weight1",
initializer=paddle.nn.initializer.Constant(value=1.0),
trainable=True,
)
weight_attr2 = paddle.ParamAttr(
name="weight2",
initializer=paddle.nn.initializer.Constant(value=2.0),
trainable=True,
)
clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0)
with paddle.static.program_guard(main_prog, startup_prog):
with paddle.utils.unique_name.guard():
a = paddle.static.data(name="a", shape=[2, 2], dtype='float32')
b = paddle.static.data(name="b", shape=[2, 2], dtype='float32')
label = paddle.static.data(
name="label", shape=[2, 1], dtype='int64'
)
sum = paddle.add(a, b)
z = paddle.pow(sum, 2.0)
fc_1 = paddle.static.nn.fc(x=z, size=2, weight_attr=weight_attr1)
prediction = paddle.static.nn.fc(
x=fc_1, size=2, weight_attr=weight_attr2, activation='softmax'
)
cost = paddle.nn.functional.cross_entropy(input=prediction, label=label, reduction='none', use_softmax=False)
loss = paddle.mean(cost)
beta1_init = 0.9
beta2_init = 0.999
epsilon_init = 1e-8
if use_tensor:
beta1 = paddle.static.create_global_var(
shape=[1],
value=float(beta1_init),
dtype='float32',
persistable=True,
name="beta1",
)
beta2 = paddle.static.create_global_var(
shape=[1],
value=float(beta2_init),
dtype='float32',
persistable=True,
name="beta2",
)
epsilon = paddle.static.create_global_var(
shape=[1],
value=float(epsilon_init),
dtype='float32',
persistable=True,
name="epsilon",
)
if use_fluid_api:
adam = fluid.optimizer.Adam(
learning_rate=0.01,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
use_global_beta_pow=use_global_beta_pow,
flatten_param_grads=flatten_param_grads,
align_size=256,
grad_clip=clip,
)
else:
adam = paddle.optimizer.Adam(
learning_rate=0.01,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
grad_clip=clip,
)
else:
if use_fluid_api:
adam = fluid.optimizer.Adam(
learning_rate=0.01,
beta1=beta1_init,
beta2=beta2_init,
epsilon=epsilon_init,
use_global_beta_pow=use_global_beta_pow,
flatten_param_grads=flatten_param_grads,
align_size=256,
grad_clip=clip,
)
else:
adam = fluid.optimizer.Adam(
learning_rate=0.01,
beta1=beta1_init,
beta2=beta2_init,
epsilon=epsilon_init,
grad_clip=clip,
)
adam.minimize(loss)
scope = fluid.Scope()
with fluid.scope_guard(scope):
exe = paddle.static.Executor(place)
exe.run(startup_prog)
print("Start run on {}".format(place))
for epoch in range(10):
pred_res, loss_res = exe.run(
main_prog,
feed={"a": a_np, "b": b_np, "label": label_np},
fetch_list=[prediction, loss],
)
print(
"Epoch {} | Prediction[0]: {}, Loss: {}".format(
epoch, pred_res[0], loss_res
)
)
paddle.disable_static()
return pred_res, loss_res
def _test_with_place(self, place):
preds = []
losses = []
for use_tensor in [True, False]:
for use_fluid_api in [True, False]:
for use_global_beta_pow in [True, False]:
for flatten_param_grads in [True, False]:
pred, loss = self._test(
place,
use_tensor,
use_fluid_api,
use_global_beta_pow,
flatten_param_grads,
)
preds.append(pred)
losses.append(loss)
for pred in preds:
np.testing.assert_allclose(pred, preds[0])
for loss in losses:
np.testing.assert_allclose(loss, losses[0])
def test_adam_api(self):
# NOTE(zhiqiu): cpu and gpu has different seed, so should compare separatly.
self._test_with_place(paddle.CPUPlace())
if core.is_compiled_with_npu():
self._test_with_place(paddle.NPUPlace(0))
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from test_adam_op import adamw_step
paddle.enable_static()
SEED = 2021
class TestAdamW(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "adamw"
param = np.random.uniform(-1, 1, (105, 102)).astype("float32")
grad = np.random.uniform(-1, 1, (105, 102)).astype("float32")
moment1 = np.random.uniform(-1, 1, (105, 102)).astype("float32")
# The second moment is positive
moment2 = np.random.random((105, 102)).astype("float32")
learning_rate = 0.5
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
}
self.attrs = {
'epsilon': epsilon,
'beta1': beta1,
'beta2': beta2,
"coeff": 0.9,
"with_decay": True,
}
param_out, moment1_out, moment2_out = adamw_step(
self.inputs, self.attrs
)
self.outputs = {
'Moment1Out': moment1_out,
'Moment2Out': moment2_out,
'ParamOut': param_out,
'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1,
'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2,
}
def set_npu(self):
self.__class__.use_npu = True
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5)
class TestAdamOpWithSkipUpdate(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "adamw"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.004
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
'Beta1Tensor': np.array([beta1]).astype("float32"),
'Beta2Tensor': np.array([beta2]).astype("float32"),
'EpsilonTensor': np.array([epsilon]).astype("float32"),
"SkipUpdate": np.array([True]).astype("bool"),
}
self.attrs = {'epsilon': epsilon, "coeff": 0.02, "with_decay": True}
self.outputs = {
'Moment1Out': moment1,
'Moment2Out': moment2,
'ParamOut': param,
'Beta1PowOut': self.inputs['Beta1Pow'],
'Beta2PowOut': self.inputs['Beta2Pow'],
}
def set_npu(self):
self.__class__.use_npu = True
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5)
class TestAdamOpWithoutDecay(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "adamw"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.004
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
'Beta1Tensor': np.array([beta1]).astype("float32"),
'Beta2Tensor': np.array([beta2]).astype("float32"),
'EpsilonTensor': np.array([epsilon]).astype("float32"),
"SkipUpdate": np.array([True]).astype("bool"),
}
self.attrs = {'epsilon': epsilon, "coeff": 0.02, "with_decay": False}
self.outputs = {
'Moment1Out': moment1,
'Moment2Out': moment2,
'ParamOut': param,
'Beta1PowOut': self.inputs['Beta1Pow'],
'Beta2PowOut': self.inputs['Beta2Pow'],
}
def set_npu(self):
self.__class__.use_npu = True
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5)
class TestNet(unittest.TestCase):
def _test(self, run_npu=True):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
np.random.seed(SEED)
a_np = np.random.random(size=(32, 32)).astype('float32')
b_np = np.random.random(size=(32, 32)).astype('float32')
label_np = np.random.randint(2, size=(32, 1)).astype('int64')
with paddle.static.program_guard(main_prog, startup_prog):
a = paddle.static.data(name="a", shape=[32, 32], dtype='float32')
b = paddle.static.data(name="b", shape=[32, 32], dtype='float32')
label = paddle.static.data(
name="label", shape=[32, 1], dtype='int64'
)
sum = paddle.add(a, b)
z = paddle.pow(sum, 2.0)
fc_1 = paddle.static.nn.fc(x=z, size=128)
prediction = paddle.static.nn.fc(x=fc_1, size=2, activation='softmax')
cost = paddle.nn.functional.cross_entropy(input=prediction, label=label, reduction='none', use_softmax=False)
loss = paddle.mean(cost)
adam = paddle.optimizer.AdamW(learning_rate=0.01, weight_decay=0.02)
adam.minimize(loss)
if run_npu:
place = paddle.NPUPlace(0)
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
print("Start run on {}".format(place))
for epoch in range(100):
pred_res, loss_res = exe.run(
main_prog,
feed={"a": a_np, "b": b_np, "label": label_np},
fetch_list=[prediction, loss],
)
if epoch % 10 == 0:
print(
"Epoch {} | Prediction[0]: {}, Loss: {}".format(
epoch, pred_res[0], loss_res
)
)
return pred_res, loss_res
def test_npu(self):
npu_pred, npu_loss = self._test(True)
cpu_pred, cpu_loss = self._test(False)
np.testing.assert_allclose(npu_pred, cpu_pred, rtol=5e-3)
np.testing.assert_allclose(npu_loss, cpu_loss, rtol=5e-3)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append("..")
from eager_op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.static.amp.amp_nn import check_finite_and_unscale
paddle.enable_static()
class TestCheckFiniteAndUnscale(unittest.TestCase):
def get_prog(self):
paddle.enable_static()
main_program = paddle.static.Program()
with program_guard(main_program):
a = paddle.static.data(name="a", shape=[32, 32], dtype='float32')
b = paddle.static.data(name="b", shape=[32, 32], dtype='float32')
scale = paddle.static.data(name="scale", shape=[1], dtype='float32')
float_status = paddle.static.data(
name="status", shape=[8], dtype='float32'
)
main_program.global_block().append_op(
type="alloc_float_status", outputs={"FloatStatus": float_status}
)
main_program.global_block().append_op(
type="clear_float_status",
inputs={"FloatStatus": float_status},
outputs={"FloatStatusOut": float_status},
)
c = paddle.divide(a, b)
out, found_inf = check_finite_and_unscale(
[c], scale, float_status=float_status
)
return main_program, out, found_inf, float_status
def run_prog(self, a, b, scale):
main_program, out, found_inf, float_status = self.get_prog()
place = fluid.NPUPlace(0)
exe = fluid.Executor(place)
out_, founf_inf_, float_status_ = exe.run(
main_program,
feed={"a": a, "b": b, "scale": scale},
fetch_list=[out, found_inf, float_status],
)
print(float_status_)
return out_, founf_inf_
def test_contains_nan(self):
a = np.zeros((32, 32)).astype('float32')
b = np.zeros((32, 32)).astype('float32')
scale = np.array([2.0]).astype('float32')
out, found_inf = self.run_prog(a, b, scale)
print(out, found_inf)
self.assertTrue(found_inf[0])
def test_contains_inf(self):
a = np.ones((32, 32)).astype('float32')
b = np.zeros((32, 32)).astype('float32')
scale = np.array([2.0]).astype('float32')
out, found_inf = self.run_prog(a, b, scale)
print(out, found_inf)
self.assertTrue(found_inf[0])
def test_not_contains_nan_inf(self):
a = np.ones((32, 32)).astype('float32')
b = np.ones((32, 32)).astype('float32')
scale = np.array([2.0]).astype('float32')
out, found_inf = self.run_prog(a, b, scale)
print(out, found_inf)
np.testing.assert_allclose(out, (a / b) / scale[0])
self.assertFalse(found_inf[0])
class TestCheckFiniteAndUnscaleClearFloatStatus(unittest.TestCase):
def get_prog(self):
paddle.enable_static()
main_program = paddle.static.Program()
with program_guard(main_program):
a = paddle.static.data(name="a", shape=[32, 32], dtype='float32')
b = paddle.static.data(name="b", shape=[32, 32], dtype='float32')
scale = paddle.static.data(name="scale", shape=[1], dtype='float32')
float_status = paddle.static.data(
name="status", shape=[8], dtype='float32'
)
main_program.global_block().append_op(
type="alloc_float_status", outputs={"FloatStatus": float_status}
)
main_program.global_block().append_op(
type="clear_float_status",
inputs={"FloatStatus": float_status},
outputs={"FloatStatusOut": float_status},
)
c = paddle.divide(a, b)
out, found_inf = check_finite_and_unscale(
[c], scale, float_status=float_status
)
main_program.global_block().append_op(
type="alloc_float_status", outputs={"FloatStatus": float_status}
)
main_program.global_block().append_op(
type="clear_float_status",
inputs={"FloatStatus": float_status},
outputs={"FloatStatusOut": float_status},
)
d = paddle.add(a, b)
out, found_inf = check_finite_and_unscale(
[d], scale, float_status=float_status
)
return main_program, out, found_inf, float_status
def run_prog(self, a, b, scale):
main_program, out, found_inf, float_status = self.get_prog()
place = fluid.NPUPlace(0)
exe = fluid.Executor(place)
out_, founf_inf_, float_status_ = exe.run(
main_program,
feed={"a": a, "b": b, "scale": scale},
fetch_list=[out, found_inf, float_status],
)
print(float_status_)
return out_, founf_inf_
def test_not_contains_nan_inf(self):
a = np.ones((32, 32)).astype('float32')
b = np.zeros((32, 32)).astype('float32')
scale = np.array([2.0]).astype('float32')
out, found_inf = self.run_prog(a, b, scale)
print(out, found_inf)
np.testing.assert_allclose(out, (a + b) / scale[0])
self.assertFalse(found_inf[0])
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
paddle.enable_static()
class BaseTestCase(OpTest):
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 0
def setUp(self):
self.set_npu()
self.initTestCase()
self.x = (1000 * np.random.random(self.dims)).astype(self.dtype)
self.inputs = {'X': self.x}
self.attrs = {'axis': self.axis}
self.outputs = {'Out': np.argmax(self.x, axis=self.axis)}
def test_check_output(self):
self.check_output_with_place(self.place)
# test argmax, dtype: float16
class TestArgMaxFloat16Case1(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float16'
self.axis = -1
class TestArgMaxFloat16Case2(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float16'
self.axis = 0
class TestArgMaxFloat16Case3(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float16'
self.axis = 1
class TestArgMaxFloat16Case4(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float16'
self.axis = 2
class TestArgMaxFloat16Case5(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4)
self.dtype = 'float16'
self.axis = -1
class TestArgMaxFloat16Case6(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4)
self.dtype = 'float16'
self.axis = 0
class TestArgMaxFloat16Case7(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4)
self.dtype = 'float16'
self.axis = 1
class TestArgMaxFloat16Case8(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (1,)
self.dtype = 'float16'
self.axis = 0
class TestArgMaxFloat16Case9(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (2,)
self.dtype = 'float16'
self.axis = 0
class TestArgMaxFloat16Case10(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3,)
self.dtype = 'float16'
self.axis = 0
# test argmax, dtype: float32
class TestArgMaxFloat32Case1(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = -1
class TestArgMaxFloat32Case2(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 0
class TestArgMaxFloat32Case3(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 1
class TestArgMaxFloat32Case4(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 2
class TestArgMaxFloat32Case5(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4)
self.dtype = 'float32'
self.axis = -1
class TestArgMaxFloat32Case6(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4)
self.dtype = 'float32'
self.axis = 0
class TestArgMaxFloat32Case7(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4)
self.dtype = 'float32'
self.axis = 1
class TestArgMaxFloat32Case8(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (1,)
self.dtype = 'float32'
self.axis = 0
class TestArgMaxFloat32Case9(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (2,)
self.dtype = 'float32'
self.axis = 0
class TestArgMaxFloat32Case10(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3,)
self.dtype = 'float32'
self.axis = 0
class BaseTestComplex1_1(OpTest):
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (4, 5, 6)
self.dtype = 'float32'
self.axis = 2
def setUp(self):
self.set_npu()
self.initTestCase()
self.x = (np.random.random(self.dims)).astype(self.dtype)
self.inputs = {'X': self.x}
self.attrs = {
'axis': self.axis,
'dtype': int(core.VarDesc.VarType.INT32),
}
self.outputs = {
'Out': np.argmax(self.x, axis=self.axis).astype("int32")
}
def test_check_output(self):
self.check_output_with_place(self.place)
class BaseTestComplex1_2(OpTest):
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (4, 5, 6)
self.dtype = 'float16'
self.axis = 2
def setUp(self):
self.set_npu()
self.initTestCase()
self.x = (np.random.random(self.dims)).astype(self.dtype)
self.inputs = {'X': self.x}
self.attrs = {
'axis': self.axis,
'dtype': int(core.VarDesc.VarType.INT32),
}
self.outputs = {
'Out': np.argmax(self.x, axis=self.axis).astype("int32")
}
def test_check_output(self):
self.check_output_with_place(self.place)
class TestArgMaxAPI(unittest.TestCase):
def initTestCase(self):
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 0
def setUp(self):
self.initTestCase()
self.__class__.use_npu = True
self.place = [paddle.NPUPlace(0)]
def test_dygraph_api(self):
def run(place):
paddle.disable_static(place)
np.random.seed(2021)
numpy_input = (np.random.random(self.dims)).astype(self.dtype)
tensor_input = paddle.to_tensor(numpy_input)
numpy_output = np.argmax(numpy_input, axis=self.axis)
paddle_output = paddle.argmax(tensor_input, axis=self.axis)
np.testing.assert_allclose(
numpy_output, paddle_output.numpy(), rtol=1e-05
)
paddle.enable_static()
for place in self.place:
run(place)
class TestArgMaxAPI_2(unittest.TestCase):
def initTestCase(self):
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 0
self.keep_dims = True
def setUp(self):
self.initTestCase()
self.__class__.use_npu = True
self.place = [paddle.NPUPlace(0)]
def test_dygraph_api(self):
def run(place):
paddle.disable_static(place)
np.random.seed(2021)
numpy_input = (np.random.random(self.dims)).astype(self.dtype)
tensor_input = paddle.to_tensor(numpy_input)
numpy_output = np.argmax(numpy_input, axis=self.axis).reshape(
1, 4, 5
)
paddle_output = paddle.argmax(
tensor_input, axis=self.axis, keepdim=self.keep_dims
)
np.testing.assert_allclose(
numpy_output, paddle_output.numpy(), rtol=1e-05
)
self.assertEqual(numpy_output.shape, paddle_output.numpy().shape)
paddle.enable_static()
for place in self.place:
run(place)
class TestArgMaxAPI_3(unittest.TestCase):
def initTestCase(self):
self.dims = (1, 9)
self.dtype = 'float32'
def setUp(self):
self.initTestCase()
self.__class__.use_npu = True
self.place = [paddle.NPUPlace(0)]
def test_dygraph_api(self):
def run(place):
paddle.disable_static(place)
np.random.seed(2021)
numpy_input = (np.random.random(self.dims)).astype(self.dtype)
tensor_input = paddle.to_tensor(numpy_input)
numpy_output = np.argmax(numpy_input).reshape([1])
paddle_output = paddle.argmax(tensor_input)
np.testing.assert_allclose(
numpy_output, paddle_output.numpy(), rtol=1e-05
)
self.assertEqual(numpy_output.shape, paddle_output.numpy().shape)
paddle.enable_static()
for place in self.place:
run(place)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
paddle.enable_static()
class BaseTestCase(OpTest):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3, 4)
self.dtype = 'float32'
self.axis = 1
def setUp(self):
self.initTestCase()
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
np.random.seed(2021)
self.x = (np.random.random(self.dims)).astype(self.dtype)
self.inputs = {'X': self.x}
self.attrs = {'axis': self.axis}
if self.op_type == "arg_min":
self.outputs = {'Out': np.argmin(self.x, axis=self.axis)}
else:
self.outputs = {'Out': np.argmax(self.x, axis=self.axis)}
def test_check_output(self):
self.check_output_with_place(self.place)
# test argmin, dtype: float16
class TestArgMinFloat16Case1(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3, 4, 5)
self.dtype = 'float16'
self.axis = -1
class TestArgMinFloat16Case2(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3, 4, 5)
self.dtype = 'float16'
self.axis = 0
class TestArgMinFloat16Case3(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3, 4, 5)
self.dtype = 'float16'
self.axis = 1
class TestArgMinFloat16Case4(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3, 4, 5)
self.dtype = 'float16'
self.axis = 2
class TestArgMinFloat16Case5(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3, 4)
self.dtype = 'float16'
self.axis = -1
class TestArgMinFloat16Case6(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3, 4)
self.dtype = 'float16'
self.axis = 0
class TestArgMinFloat16Case7(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3, 4)
self.dtype = 'float16'
self.axis = 1
class TestArgMinFloat16Case8(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (1,)
self.dtype = 'float16'
self.axis = 0
class TestArgMinFloat16Case9(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (2,)
self.dtype = 'float16'
self.axis = 0
class TestArgMinFloat16Case10(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3,)
self.dtype = 'float16'
self.axis = 0
# test argmin, dtype: float32
class TestArgMinFloat32Case1(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = -1
class TestArgMinFloat32Case2(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 0
class TestArgMinFloat32Case3(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 1
class TestArgMinFloat32Case4(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 2
class TestArgMinFloat32Case5(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3, 4)
self.dtype = 'float32'
self.axis = -1
class TestArgMinFloat32Case6(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3, 4)
self.dtype = 'float32'
self.axis = 0
class TestArgMinFloat32Case7(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3, 4)
self.dtype = 'float32'
self.axis = 1
class TestArgMinFloat32Case8(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (1,)
self.dtype = 'float32'
self.axis = 0
class TestArgMinFloat32Case9(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (2,)
self.dtype = 'float32'
self.axis = 0
class TestArgMinFloat32Case10(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_min'
self.dims = (3,)
self.dtype = 'float32'
self.axis = 0
class TestArgMinAPI(unittest.TestCase):
def initTestCase(self):
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 0
def setUp(self):
self.initTestCase()
self.__class__.use_npu = True
self.place = [paddle.NPUPlace(0)]
def test_dygraph_api(self):
def run(place):
paddle.disable_static(place)
np.random.seed(2021)
numpy_input = (np.random.random(self.dims)).astype(self.dtype)
tensor_input = paddle.to_tensor(numpy_input)
numpy_output = np.argmin(numpy_input, axis=self.axis)
paddle_output = paddle.argmin(tensor_input, axis=self.axis)
np.testing.assert_allclose(
numpy_output, paddle_output.numpy(), rtol=1e-05
)
paddle.enable_static()
for place in self.place:
run(place)
class TestArgMaxAPI_2(unittest.TestCase):
def initTestCase(self):
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 0
self.keep_dims = True
def setUp(self):
self.initTestCase()
self.__class__.use_npu = True
self.place = [paddle.NPUPlace(0)]
def test_dygraph_api(self):
def run(place):
paddle.disable_static(place)
np.random.seed(2021)
numpy_input = (np.random.random(self.dims)).astype(self.dtype)
tensor_input = paddle.to_tensor(numpy_input)
numpy_output = np.argmin(numpy_input, axis=self.axis).reshape(
1, 4, 5
)
paddle_output = paddle.argmin(
tensor_input, axis=self.axis, keepdim=self.keep_dims
)
np.testing.assert_allclose(
numpy_output, paddle_output.numpy(), rtol=1e-05
)
self.assertEqual(numpy_output.shape, paddle_output.numpy().shape)
paddle.enable_static()
for place in self.place:
run(place)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import ParamAttr
from paddle.fluid.framework import Program, grad_var_name
from paddle.fluid.executor import Executor
from paddle.fluid.backward import append_backward
paddle.enable_static()
class TestArgsortOp(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "argsort"
self.place = paddle.NPUPlace(0)
self.init_dtype()
self.init_inputshape()
self.init_axis()
self.init_direction()
self.x = np.random.random(self.input_shape).astype(self.dtype)
self.inputs = {"X": self.x}
self.attrs = {"axis": self.axis, "descending": self.descending}
self.get_output()
self.outputs = {"Out": self.sorted_x, "Indices": self.indices}
def get_output(self):
if self.descending:
self.indices = np.flip(
np.argsort(self.x, kind='heapsort', axis=self.axis), self.axis
)
self.sorted_x = np.flip(
np.sort(self.x, kind='heapsort', axis=self.axis), self.axis
)
else:
self.indices = np.argsort(self.x, kind='heapsort', axis=self.axis)
self.sorted_x = np.sort(self.x, kind='heapsort', axis=self.axis)
def set_npu(self):
self.__class__.use_npu = True
self.__class__.no_need_check_grad = True
def init_inputshape(self):
self.input_shape = (2, 2, 2, 3, 3)
def init_dtype(self):
self.dtype = np.float16
def init_axis(self):
self.axis = -1
def test_check_output(self):
self.check_output_with_place(self.place)
def init_direction(self):
self.descending = False
class TestArgsortOpAxis0NPU(TestArgsortOp):
def init_axis(self):
self.axis = 0
class TestArgsortOpAxis1NPU(TestArgsortOp):
def init_axis(self):
self.axis = 1
class TestArgsortOpAxis2NPU(TestArgsortOp):
def init_axis(self):
self.axis = 2
class TestArgsortOpAxisNeg1NPU(TestArgsortOp):
def init_axis(self):
self.axis = -1
class TestArgsortOpAxisNeg2NPU(TestArgsortOp):
def init_axis(self):
self.axis = -2
class TestArgsortOpDescendingAxisNPU(TestArgsortOp):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxis0NPU(TestArgsortOpAxis0NPU):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxis1NPU(TestArgsortOpAxis1NPU):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxis2NPU(TestArgsortOpAxis2NPU):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxisNeg1NPU(TestArgsortOpAxisNeg1NPU):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxisNeg2NPU(TestArgsortOpAxisNeg2NPU):
def init_direction(self):
self.descending = True
# liurui25: argsort of npu has bug with type fp32,
# it will change the type from fp32 to fp16,
# so the check_output_with_place add thw atol
# this test is only used to test the grad
# issue: https://gitee.com/ascend/modelzoo/issues/I44I7K
class TestArgsortOpAxis0NPUFP32(TestArgsortOp):
def init_axis(self):
self.axis = 0
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-2)
def set_npu(self):
self.__class__.use_npu = True
def test_check_grad(self):
self.check_grad_with_place(
self.place, ["X"], "Out", max_relative_error=0.03
)
class TestArgsortOpAxis1NPUFP32(TestArgsortOpAxis0NPUFP32):
def init_axis(self):
self.axis = 1
class TestArgsortOpAxis2NPUFP32(TestArgsortOpAxis0NPUFP32):
def init_axis(self):
self.axis = 2
class TestArgsortOpAxisNeg1NPUFP32(TestArgsortOpAxis0NPUFP32):
def init_axis(self):
self.axis = -1
class TestArgsortOpAxisNeg2NPUFP32(TestArgsortOpAxis0NPUFP32):
def init_axis(self):
self.axis = -2
class TestArgsortOpDescendingAxisNPUFP32(TestArgsortOpAxis0NPUFP32):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxis0NPUFP32(TestArgsortOpAxis0NPUFP32):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxis1NPUFP32(TestArgsortOpAxis1NPUFP32):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxis2NPUFP32(TestArgsortOpAxis2NPUFP32):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxisNeg1NPUFP32(TestArgsortOpAxisNeg1NPUFP32):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxisNeg2NPUFP32(TestArgsortOpAxisNeg2NPUFP32):
def init_direction(self):
self.descending = True
# test cases for int64
class TestArgsortOpAxis0NPUINT64(TestArgsortOp):
def setUp(self):
self.set_npu()
self.op_type = "argsort"
self.place = paddle.NPUPlace(0)
self.init_dtype()
self.init_inputshape()
self.init_axis()
self.init_direction()
self.x = np.random.randint(
low=-100, high=100, size=self.input_shape, dtype=self.dtype
).astype(self.dtype)
self.inputs = {"X": self.x}
self.attrs = {"axis": self.axis, "descending": self.descending}
self.get_output()
self.outputs = {"Out": self.sorted_x, "Indices": self.indices}
def init_axis(self):
self.axis = 0
def init_dtype(self):
self.dtype = np.int64
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-2)
def set_npu(self):
self.__class__.use_npu = True
class TestArgsortOpAxis1NPUINT64(TestArgsortOpAxis0NPUINT64):
def init_axis(self):
self.axis = 1
class TestArgsortOpAxis2NPUINT64(TestArgsortOpAxis0NPUINT64):
def init_axis(self):
self.axis = 2
class TestArgsortOpAxisNeg1NPUINT64(TestArgsortOpAxis0NPUINT64):
def init_axis(self):
self.axis = -1
class TestArgsortOpAxisNeg2NPUINT64(TestArgsortOpAxis0NPUINT64):
def init_axis(self):
self.axis = -2
class TestArgsortOpDescendingAxisNPUINT64(TestArgsortOpAxis0NPUINT64):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxis0NPUINT64(TestArgsortOpAxis0NPUINT64):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxis1NPUINT64(TestArgsortOpAxis1NPUINT64):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxis2NPUINT64(TestArgsortOpAxis2NPUINT64):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxisNeg1NPUINT64(TestArgsortOpAxisNeg1NPUINT64):
def init_direction(self):
self.descending = True
class TestArgsortOpDescendingAxisNeg2NPUINT64(TestArgsortOpAxisNeg2NPUINT64):
def init_direction(self):
self.descending = True
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
paddle.enable_static()
SEED = 2021
class TestAssign(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "assign"
self.init_dtype()
x = np.random.random([3, 3]).astype(self.dtype)
self.inputs = {'X': x}
self.attrs = {}
self.outputs = {'Out': x}
def set_npu(self):
self.__class__.use_npu = True
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append("..")
import eager_op_test
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
paddle.enable_static()
np.random.seed(2021)
class TestAssignValueNPUOp(eager_op_test.OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "assign_value"
self.inputs = {}
self.attrs = {}
self.init_data()
self.attrs["shape"] = self.value.shape
self.attrs["dtype"] = framework.convert_np_dtype_to_dtype_(
self.value.dtype
)
self.outputs = {"Out": self.value}
def set_npu(self):
self.__class__.use_npu = True
def init_data(self):
self.value = np.random.random(size=(2, 5)).astype(np.float32)
self.attrs["fp32_values"] = [float(v) for v in self.value.flat]
def test_forward(self):
self.check_output_with_place(self.place)
class TestAssignValueNPUOp2(TestAssignValueNPUOp):
def init_data(self):
self.value = np.random.random(size=(2, 5)).astype(np.int32)
self.attrs["int32_values"] = [int(v) for v in self.value.flat]
class TestAssignValueNPUOp3(TestAssignValueNPUOp):
def init_data(self):
self.value = np.random.random(size=(2, 5)).astype(np.int64)
self.attrs["int64_values"] = [int(v) for v in self.value.flat]
class TestAssignValueNPUOp4(TestAssignValueNPUOp):
def init_data(self):
self.value = np.random.choice(a=[False, True], size=(2, 5)).astype(
np.bool_
)
self.attrs["bool_values"] = [int(v) for v in self.value.flat]
class TestAssignApi(unittest.TestCase):
def setUp(self):
self.init_dtype()
self.value = (-100 + 200 * np.random.random(size=(2, 5))).astype(
self.dtype
)
self.place = (
fluid.NPUPlace(0)
if fluid.core.is_compiled_with_npu()
else fluid.CPUPlace()
)
def init_dtype(self):
self.dtype = "float32"
def test_assign(self):
main_program = fluid.Program()
with fluid.program_guard(main_program):
x = paddle.tensor.create_tensor(dtype=self.dtype)
paddle.assign(self.value, output=x)
exe = fluid.Executor(self.place)
[fetched_x] = exe.run(main_program, feed={}, fetch_list=[x])
np.testing.assert_allclose(fetched_x, self.value)
self.assertEqual(fetched_x.dtype, self.value.dtype)
class TestAssignApi2(TestAssignApi):
def init_dtype(self):
self.dtype = "int32"
class TestAssignApi3(TestAssignApi):
def init_dtype(self):
self.dtype = "int64"
class TestAssignApi4(TestAssignApi):
def setUp(self):
self.init_dtype()
self.value = np.random.choice(a=[False, True], size=(2, 5)).astype(
np.bool_
)
self.place = (
fluid.NPUPlace(0)
if fluid.core.is_compiled_with_npu()
else fluid.CPUPlace()
)
def init_dtype(self):
self.dtype = "bool"
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
paddle.enable_static()
SEED = 1024
class TestAtan(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "atan"
self.place = paddle.NPUPlace(0)
self.dtype = np.float32
np.random.seed(SEED)
self.shape = [11, 17]
x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
out = np.arctan(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def set_attrs(self):
pass
def set_npu(self):
self.__class__.use_npu = True
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
def test_out_name(self):
with fluid.program_guard(fluid.Program()):
np_x = np.array([0.1])
data = paddle.static.data(name="X", shape=[1])
out = paddle.atan(data, name='Y')
place = paddle.NPUPlace(0)
exe = fluid.Executor(place)
(result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
expected = np.arctan(np_x)
self.assertEqual(result, expected)
def test_dygraph(self):
with fluid.dygraph.guard(paddle.NPUPlace(0)):
np_x = np.array([0.1])
x = fluid.dygraph.to_variable(np_x)
z = paddle.atan(x).numpy()
z_expected = np.arctan(np_x)
self.assertEqual(z, z_expected)
def test_check_output(self):
self.check_output_with_place(self.place)
class TestAtanShape(TestAtan):
def set_attrs(self):
self.shape = [12, 23, 10]
class TestAtanFloat16(TestAtan):
def set_attrs(self):
self.dtype = np.float16
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
paddle.enable_static()
def test_static_layer(
place, input_np, label_np, reduction='mean', weight_np=None
):
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.static.data(
name='input', shape=input_np.shape, dtype='float32'
)
label = paddle.static.data(
name='label', shape=label_np.shape, dtype='float32'
)
if weight_np is not None:
weight = paddle.static.data(
name='weight', shape=weight_np.shape, dtype='float32'
)
bce_loss = paddle.nn.loss.BCELoss(
weight=weight, reduction=reduction
)
else:
bce_loss = paddle.nn.loss.BCELoss(reduction=reduction)
res = bce_loss(input, label)
exe = paddle.static.Executor(place)
static_result = exe.run(
prog,
feed={"input": input_np, "label": label_np}
if weight_np is None
else {"input": input_np, "label": label_np, "weight": weight_np},
fetch_list=[res],
)
return static_result[0]
def test_static_functional(
place, input_np, label_np, reduction='mean', weight_np=None
):
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.static.data(
name='input', shape=input_np.shape, dtype='float32'
)
label = paddle.static.data(
name='label', shape=label_np.shape, dtype='float32'
)
if weight_np is not None:
weight = paddle.static.data(
name='weight', shape=weight_np.shape, dtype='float32'
)
res = paddle.nn.functional.binary_cross_entropy(
input, label, weight=weight, reduction=reduction
)
else:
res = paddle.nn.functional.binary_cross_entropy(
input, label, reduction=reduction
)
exe = paddle.static.Executor(place)
static_result = exe.run(
prog,
feed={"input": input_np, "label": label_np}
if weight_np is None
else {"input": input_np, "label": label_np, "weight": weight_np},
fetch_list=[res],
)
return static_result[0]
def test_dygraph_layer(
place, input_np, label_np, reduction='mean', weight_np=None
):
paddle.disable_static(place)
if weight_np is not None:
weight = paddle.to_tensor(weight_np)
bce_loss = paddle.nn.loss.BCELoss(weight=weight, reduction=reduction)
else:
bce_loss = paddle.nn.loss.BCELoss(reduction=reduction)
dy_res = bce_loss(paddle.to_tensor(input_np), paddle.to_tensor(label_np))
dy_result = dy_res.numpy()
paddle.enable_static()
return dy_result
def test_dygraph_functional(
place, input_np, label_np, reduction='mean', weight_np=None
):
paddle.disable_static(place)
input = paddle.to_tensor(input_np)
label = paddle.to_tensor(label_np)
if weight_np is not None:
weight = paddle.to_tensor(weight_np)
dy_res = paddle.nn.functional.binary_cross_entropy(
input, label, weight=weight, reduction=reduction
)
else:
dy_res = paddle.nn.functional.binary_cross_entropy(
input, label, reduction=reduction
)
dy_result = dy_res.numpy()
paddle.enable_static()
return dy_result
def calc_bceloss(input_np, label_np, reduction='mean', weight_np=None):
if weight_np is None:
expected = -1 * (
label_np * np.log(input_np)
+ (1.0 - label_np) * np.log(1.0 - input_np)
)
else:
expected = (
-1
* weight_np
* (
label_np * np.log(input_np)
+ (1.0 - label_np) * np.log(1.0 - input_np)
)
)
if reduction == 'mean':
expected = np.mean(expected)
elif reduction == 'sum':
expected = np.sum(expected)
else:
expected = expected
return expected
class TestBCELoss(unittest.TestCase):
def test_BCELoss(self):
input_np = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float32)
label_np = np.random.randint(0, 2, size=(20, 30)).astype(np.float32)
places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_npu():
places.append(fluid.NPUPlace(0))
reductions = ['sum', 'mean', 'none']
for place in places:
for reduction in reductions:
static_result = test_static_layer(
place, input_np, label_np, reduction
)
dy_result = test_dygraph_layer(
place, input_np, label_np, reduction
)
expected = calc_bceloss(input_np, label_np, reduction)
np.testing.assert_allclose(static_result, expected, rtol=1e-6)
np.testing.assert_allclose(static_result, dy_result)
np.testing.assert_allclose(dy_result, expected, rtol=1e-6)
static_functional = test_static_functional(
place, input_np, label_np, reduction
)
dy_functional = test_dygraph_functional(
place, input_np, label_np, reduction
)
np.testing.assert_allclose(
static_functional, expected, rtol=1e-6
)
np.testing.assert_allclose(static_functional, dy_functional)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-6)
def test_BCELoss_weight(self):
input_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype(
np.float32
)
label_np = np.random.randint(0, 2, size=(2, 3, 4, 10)).astype(
np.float32
)
weight_np = np.random.random(size=(3, 4, 10)).astype(np.float32)
place = (
fluid.NPUPlace(0)
if fluid.core.is_compiled_with_npu()
else fluid.CPUPlace()
)
for reduction in ['sum', 'mean', 'none']:
static_result = test_static_layer(
place, input_np, label_np, reduction, weight_np=weight_np
)
dy_result = test_dygraph_layer(
place, input_np, label_np, reduction, weight_np=weight_np
)
expected = calc_bceloss(
input_np, label_np, reduction, weight_np=weight_np
)
np.testing.assert_allclose(static_result, expected, rtol=1e-6)
np.testing.assert_allclose(static_result, dy_result, rtol=1e-6)
np.testing.assert_allclose(dy_result, expected, rtol=1e-6)
static_functional = test_static_functional(
place, input_np, label_np, reduction, weight_np=weight_np
)
dy_functional = test_dygraph_functional(
place, input_np, label_np, reduction, weight_np=weight_np
)
np.testing.assert_allclose(static_functional, expected, rtol=1e-6)
np.testing.assert_allclose(
static_functional, dy_functional, rtol=1e-6
)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-6)
def test_BCELoss_error(self):
paddle.disable_static(paddle.NPUPlace(0))
self.assertRaises(
ValueError, paddle.nn.loss.BCELoss, reduction="unsupport reduction"
)
input = paddle.to_tensor([[0.1, 0.3]], dtype='float32')
label = paddle.to_tensor([[0.0, 1.0]], dtype='float32')
self.assertRaises(
ValueError,
paddle.nn.functional.binary_cross_entropy,
input=input,
label=label,
reduction="unsupport reduction",
)
paddle.enable_static()
def bce_loss(input, label):
return -1 * (label * np.log(input) + (1.0 - label) * np.log(1.0 - input))
class TestBceLossOp(OpTest):
def setUp(self):
self.set_npu()
self.init_test_case()
self.op_type = "bce_loss"
input_np = np.random.uniform(0.1, 0.8, self.shape).astype("float32")
label_np = np.random.randint(0, 2, self.shape).astype("float32")
output_np = bce_loss(input_np, label_np)
self.inputs = {'X': input_np, 'Label': label_np}
self.outputs = {'Out': output_np}
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
def init_test_case(self):
self.shape = [10, 10]
class TestBceLossOpCase1(OpTest):
def init_test_cast(self):
self.shape = [2, 3, 4, 5]
class TestBceLossOpCase2(OpTest):
def init_test_cast(self):
self.shape = [2, 3, 20]
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
from paddle.fluid.framework import Program, program_guard
class TestBeamSearchDecodeNPUOp(unittest.TestCase):
"""unittest of beam_search_decode npu op"""
def setUp(self):
self.scope = core.Scope()
self.place = paddle.NPUPlace(0)
def append_lod_tensor(self, tensor_array, lod, data):
lod_tensor = core.LoDTensor()
lod_tensor.set_lod(lod)
lod_tensor.set(data, self.place)
tensor_array.append(lod_tensor)
def test_get_set(self):
ids = self.scope.var("ids").get_lod_tensor_array()
scores = self.scope.var("scores").get_lod_tensor_array()
# Construct sample data with 5 steps and 2 source sentences
# beam_size = 2, end_id = 1
# start with start_id
[
self.append_lod_tensor(
array, [[0, 1, 2], [0, 1, 2]], np.array([0, 0], dtype=dtype)
)
for array, dtype in ((ids, "int64"), (scores, "float32"))
]
[
self.append_lod_tensor(
array,
[[0, 1, 2], [0, 2, 4]],
np.array([2, 3, 4, 5], dtype=dtype),
)
for array, dtype in ((ids, "int64"), (scores, "float32"))
]
[
self.append_lod_tensor(
array,
[[0, 2, 4], [0, 2, 2, 4, 4]],
np.array([3, 1, 5, 4], dtype=dtype),
)
for array, dtype in ((ids, "int64"), (scores, "float32"))
]
[
self.append_lod_tensor(
array,
[[0, 2, 4], [0, 1, 2, 3, 4]],
np.array([1, 1, 3, 5], dtype=dtype),
)
for array, dtype in ((ids, "int64"), (scores, "float32"))
]
[
self.append_lod_tensor(
array,
[[0, 2, 4], [0, 0, 0, 2, 2]],
np.array([5, 1], dtype=dtype),
)
for array, dtype in ((ids, "int64"), (scores, "float32"))
]
sentence_ids = self.scope.var("sentence_ids").get_tensor()
sentence_scores = self.scope.var("sentence_scores").get_tensor()
beam_search_decode_op = Operator(
"beam_search_decode",
# inputs
Ids="ids",
Scores="scores",
# outputs
SentenceIds="sentence_ids",
SentenceScores="sentence_scores",
beam_size=2,
end_id=1,
)
beam_search_decode_op.run(self.scope, self.place)
expected_lod = [[0, 2, 4], [0, 4, 7, 12, 17]]
self.assertEqual(sentence_ids.lod(), expected_lod)
self.assertEqual(sentence_scores.lod(), expected_lod)
expected_data = np.array(
[0, 2, 3, 1, 0, 2, 1, 0, 4, 5, 3, 5, 0, 4, 5, 3, 1], "int64"
)
np.testing.assert_array_equal(np.array(sentence_ids), expected_data)
np.testing.assert_array_equal(np.array(sentence_scores), expected_data)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import sys
sys.path.append("..")
from eager_op_test import OpTest
import unittest
import numpy as np
import paddle.fluid as fluid
paddle.enable_static()
class TestBeamSearchNPUOp(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "beam_search"
self.init_data()
self.inputs = {
'pre_ids': (self.pre_ids, self.lod),
'pre_scores': (self.pre_score, self.lod),
'ids': (self.ids, self.lod),
'scores': (self.score, self.lod),
}
# The `target_lod` attribute is still based on offset
self.attrs = {
'level': 0,
'beam_size': self.beam_size,
'end_id': 0,
'is_accumulated': self.is_accumulated,
}
self.outputs = {
'selected_ids': (self.selected_ids, self.out_lod),
'selected_scores': (self.selected_scores, self.out_lod),
'parent_idx': self.parent_idx,
}
def set_npu(self):
self.__class__.use_npu = True
def init_data(self):
self.beam_size = 2
self.is_accumulated = True
self.pre_ids = np.array([[1], [2], [3], [4]], dtype='int64')
self.ids = np.array(
[[4, 2, 5], [2, 1, 3], [3, 5, 2], [8, 2, 1]], dtype='int64'
)
self.lod = [[2, 2], [1, 1, 1, 1]]
self.out_lod = [[2, 2], [1, 1, 1, 1]]
self.offset_lod = [[0, 2, 4], [0, 1, 2, 3, 4]]
self.score = np.array(
[
[0.5, 0.3, 0.2],
[0.6, 0.3, 0.1],
[0.9, 0.5, 0.1],
[0.7, 0.5, 0.1],
],
dtype='float32',
)
self.pre_score = np.array([[0.1], [0.2], [0.3], [0.4]], dtype='float32')
self.selected_ids = np.array([4, 2, 3, 8])[:, np.newaxis]
self.selected_scores = np.array([0.5, 0.6, 0.9, 0.7])[:, np.newaxis]
self.parent_idx = np.array([0, 1, 2, 3])
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-3)
class TestBeamSearchNPUOp2(TestBeamSearchNPUOp):
def init_data(self):
self.beam_size = 2
self.is_accumulated = True
self.pre_ids = np.array([[1], [2], [3], [4]], dtype='int64')
self.ids = np.array([[4, 2], [7, 3], [3, 5], [8, 1]], dtype='int64')
self.lod = [[2, 2], [1, 1, 1, 1]]
self.out_lod = [[2, 2], [2, 0, 1, 1]]
self.offset_lod = [[0, 2, 4], [0, 2, 2, 3, 4]]
self.score = np.array(
[
[0.6, 0.9],
[0.5, 0.3],
[0.9, 0.5],
[0.1, 0.7],
],
dtype='float32',
)
self.pre_score = np.array([[0.1], [0.2], [0.3], [0.4]], dtype='float32')
self.selected_ids = np.array([4, 2, 3, 1])[:, np.newaxis]
self.selected_scores = np.array([0.6, 0.9, 0.9, 0.7])[:, np.newaxis]
self.parent_idx = np.array([0, 0, 2, 3])
class TestBeamSearchNPUOp3(TestBeamSearchNPUOp):
def init_data(self):
# end_id = 0
self.beam_size = 2
self.is_accumulated = True
self.pre_ids = np.array([[1], [0], [0], [4]], dtype='int64')
self.ids = np.array([[4, 2], [7, 3], [3, 5], [8, 1]], dtype='int64')
self.lod = [[2, 2], [1, 1, 1, 1]]
self.out_lod = [[2, 2], [1, 1, 0, 2]]
self.offset_lod = [[0, 2, 4], [0, 1, 2, 2, 4]]
self.score = np.array(
[
[0.6, 0.9],
[0.5, 0.3],
[0.9, 0.5],
[0.6, 0.7],
],
dtype='float32',
)
self.pre_score = np.array([[0.1], [1.2], [0.5], [0.4]], dtype='float32')
self.selected_ids = np.array([2, 0, 8, 1])[:, np.newaxis]
self.selected_scores = np.array([0.9, 1.2, 0.6, 0.7])[:, np.newaxis]
self.parent_idx = np.array([0, 1, 3, 3])
class TestBeamSearchNPUOp4(TestBeamSearchNPUOp):
def init_data(self):
# is_accumulated = False
self.beam_size = 2
self.is_accumulated = False
self.pre_ids = np.array([[1], [2], [3], [4]], dtype='int64')
self.ids = np.array([[4, 2], [7, 3], [3, 5], [8, 1]], dtype='int64')
self.lod = [[2, 2], [1, 1, 1, 1]]
self.out_lod = [[2, 2], [0, 2, 1, 1]]
self.offset_lod = [[0, 2, 4], [0, 0, 2, 3, 4]]
self.score = np.array(
[
[0.6, 0.9],
[0.5, 0.3],
[0.9, 0.5],
[0.1, 0.7],
],
dtype='float32',
)
self.pre_score = np.array([[0.1], [2.2], [0.3], [0.4]], dtype='float32')
self.selected_ids = np.array([7, 3, 3, 1])[:, np.newaxis]
self.selected_scores = np.array(
[1.50685, 0.996027, 0.194639, 0.043325]
)[:, np.newaxis]
self.parent_idx = np.array([1, 1, 2, 3])
class TestBeamSearchNPUOp5(TestBeamSearchNPUOp):
def init_data(self):
# beam_size = 1
self.beam_size = 1
self.is_accumulated = True
self.pre_ids = np.array([[1], [2], [3], [4]], dtype='int64')
self.ids = np.array([[4, 2], [7, 3], [3, 5], [8, 1]], dtype='int64')
self.lod = [[1, 1, 1, 1], [1, 1, 1, 1]]
self.out_lod = [[1, 1, 1, 1], [1, 1, 1, 1]]
self.offset_lod = [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]
self.score = np.array(
[
[0.6, 0.9],
[0.5, 0.3],
[0.9, 0.5],
[0.1, 0.7],
],
dtype='float32',
)
self.pre_score = np.array([[0.1], [0.2], [0.3], [0.4]], dtype='float32')
self.selected_ids = np.array([2, 7, 3, 1])[:, np.newaxis]
self.selected_scores = np.array([0.9, 0.5, 0.9, 0.7])[:, np.newaxis]
self.parent_idx = np.array([0, 1, 2, 3])
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.nn.functional import interpolate
import paddle
from test_bilinear_interp_v2_op import bilinear_interp_np
paddle.enable_static()
class TestBilinearInterpOp(OpTest):
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def setUp(self):
self.set_npu()
self.out_size = None
self.actual_shape = None
self.data_layout = 'NCHW'
self.init_test_case()
self.op_type = "bilinear_interp_v2"
input_np = np.random.random(self.input_shape).astype(self.dtype)
if self.data_layout == "NCHW":
in_h = self.input_shape[2]
in_w = self.input_shape[3]
else:
in_h = self.input_shape[1]
in_w = self.input_shape[2]
scale_h = 0
scale_w = 0
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if self.scale > 0.0:
scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
scale_w = scale_h = self.scale[0]
elif isinstance(self.scale, list) and len(self.scale) > 1:
scale_w = self.scale[1]
scale_h = self.scale[0]
out_h = int(in_h * scale_h)
out_w = int(in_w * scale_w)
else:
out_h = self.out_h
out_w = self.out_w
output_np = bilinear_interp_np(
input_np,
out_h,
out_w,
scale_w,
scale_h,
self.out_size,
self.actual_shape,
self.align_corners,
self.align_mode,
self.data_layout,
)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
self.attrs = {
'out_h': self.out_h,
'out_w': self.out_w,
'interp_method': self.interp_method,
'align_corners': self.align_corners,
'align_mode': self.align_mode,
'data_layout': self.data_layout,
}
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if self.scale > 0.0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
self.scale = [self.scale[0], self.scale[0]]
self.attrs['scale'] = self.scale
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output_with_place(self.place, atol=self.atol)
def test_check_grad(self):
self.__class__.exist_check_grad = True
if self.dtype == 'float16':
return
self.max_relative_error = 0.005
inputs_to_check = ['X']
output_names = ['Out']
no_grad_set = set()
cpu_place = fluid.CPUPlace()
cpu_grads = self._get_gradient(
inputs_to_check, cpu_place, output_names, no_grad_set
)
npu_grads = self._get_gradient(
inputs_to_check, self.place, output_names, no_grad_set
)
self._assert_is_close(
cpu_grads,
npu_grads,
inputs_to_check,
self.max_relative_error,
"Gradient Check between places",
)
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 1.5
self.align_corners = False
self.align_mode = 1
self.dtype = 'float32'
self.atol = 1e-5
class TestBilinearInterpCaseFP16(TestBilinearInterpOp):
def init_test_case(self):
super().init_test_case()
self.dtype = 'float16'
self.atol = 1e-2
class TestBilinearInterpCase1(TestBilinearInterpOp):
def init_test_case(self):
super().init_test_case()
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.0
class TestBilinearInterpCase2(TestBilinearInterpOp):
def init_test_case(self):
super().init_test_case()
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.0
class TestBilinearInterpCase3(TestBilinearInterpOp):
def init_test_case(self):
super().init_test_case()
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.0
class TestBilinearInterpCase4(TestBilinearInterpOp):
def init_test_case(self):
super().init_test_case()
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.0
self.out_size = np.array([2, 2]).astype("int32")
class TestBilinearInterpCase5(TestBilinearInterpOp):
def init_test_case(self):
super().init_test_case()
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.0
self.out_size = np.array([11, 11]).astype("int32")
class TestBilinearInterpCase6(TestBilinearInterpOp):
def init_test_case(self):
super().init_test_case()
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.0
self.out_size = np.array([65, 33]).astype("int32")
class TestBilinearInterpCase7(TestBilinearInterpOp):
def init_test_case(self):
super().init_test_case()
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = [2.0, 0.5]
class TestBilinearInterpSame(TestBilinearInterpOp):
def init_test_case(self):
super().init_test_case()
self.input_shape = [2, 3, 32, 64]
self.out_h = 32
self.out_w = 64
self.scale = 0.0
class TestBilinearInterpActualShape(TestBilinearInterpOp):
def init_test_case(self):
super().init_test_case()
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.0
self.out_size = np.array([66, 40]).astype("int32")
class TestBilinearInterpDataLayout(TestBilinearInterpOp):
def init_test_case(self):
super().init_test_case()
self.input_shape = [2, 5, 5, 3]
self.out_h = 2
self.out_w = 2
self.scale = 0.0
self.out_size = np.array([3, 3]).astype("int32")
self.data_layout = "NHWC"
class TestBilinearInterpOtherMethod1(TestBilinearInterpOp):
def set_align_mode(self):
self.align_corners = False
self.align_mode = 1
class TestBilinearInterpWithMethod2(TestBilinearInterpOp):
def set_align_mode(self):
self.align_corners = False
self.align_mode = 0
class TestBilinearInterpWithMethod3(TestBilinearInterpOp):
def set_align_mode(self):
self.align_corners = True
self.align_mode = 0
class TestBilinearInterpScale1(TestBilinearInterpOp):
def init_test_case(self):
super().init_test_case()
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 2.0
class TestBilinearInterpScale2(TestBilinearInterpOp):
def init_test_case(self):
super().init_test_case()
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 1.0
class TestBilinearInterpZero(TestBilinearInterpOp):
def init_test_case(self):
super().init_test_case()
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 0.2
self.align_mode = 0
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append("..")
import math
import paddle
from eager_op_test import OpTest
paddle.enable_static()
np.random.seed(2021)
def box_decoder(t_box, p_box, pb_v, output_box, norm, axis=0):
pb_w = p_box[:, 2] - p_box[:, 0] + (norm == False)
pb_h = p_box[:, 3] - p_box[:, 1] + (norm == False)
pb_x = pb_w * 0.5 + p_box[:, 0]
pb_y = pb_h * 0.5 + p_box[:, 1]
shape = (1, p_box.shape[0]) if axis == 0 else (p_box.shape[0], 1)
pb_w = pb_w.reshape(shape)
pb_h = pb_h.reshape(shape)
pb_x = pb_x.reshape(shape)
pb_y = pb_y.reshape(shape)
if pb_v.ndim == 2:
var_shape = (
(1, pb_v.shape[0], pb_v.shape[1])
if axis == 0
else (pb_v.shape[0], 1, pb_v.shape[1])
)
pb_v = pb_v.reshape(var_shape)
if pb_v.ndim == 1:
tb_x = pb_v[0] * t_box[:, :, 0] * pb_w + pb_x
tb_y = pb_v[1] * t_box[:, :, 1] * pb_h + pb_y
tb_w = np.exp(pb_v[2] * t_box[:, :, 2]) * pb_w
tb_h = np.exp(pb_v[3] * t_box[:, :, 3]) * pb_h
else:
tb_x = pb_v[:, :, 0] * t_box[:, :, 0] * pb_w + pb_x
tb_y = pb_v[:, :, 1] * t_box[:, :, 1] * pb_h + pb_y
tb_w = np.exp(pb_v[:, :, 2] * t_box[:, :, 2]) * pb_w
tb_h = np.exp(pb_v[:, :, 3] * t_box[:, :, 3]) * pb_h
output_box[:, :, 0] = tb_x - tb_w / 2
output_box[:, :, 1] = tb_y - tb_h / 2
output_box[:, :, 2] = tb_x + tb_w / 2 - (not norm)
output_box[:, :, 3] = tb_y + tb_h / 2 - (not norm)
def box_encoder(t_box, p_box, pb_v, output_box, norm):
pb_w = p_box[:, 2] - p_box[:, 0] + (norm == False)
pb_h = p_box[:, 3] - p_box[:, 1] + (norm == False)
pb_x = pb_w * 0.5 + p_box[:, 0]
pb_y = pb_h * 0.5 + p_box[:, 1]
shape = (1, p_box.shape[0])
pb_w = pb_w.reshape(shape)
pb_h = pb_h.reshape(shape)
pb_x = pb_x.reshape(shape)
pb_y = pb_y.reshape(shape)
if pb_v.ndim == 2:
pb_v = pb_v.reshape(1, pb_v.shape[0], pb_v.shape[1])
tb_x = ((t_box[:, 2] + t_box[:, 0]) / 2).reshape(t_box.shape[0], 1)
tb_y = ((t_box[:, 3] + t_box[:, 1]) / 2).reshape(t_box.shape[0], 1)
tb_w = (t_box[:, 2] - t_box[:, 0]).reshape(t_box.shape[0], 1) + (not norm)
tb_h = (t_box[:, 3] - t_box[:, 1]).reshape(t_box.shape[0], 1) + (not norm)
if pb_v.ndim == 1:
output_box[:, :, 0] = (tb_x - pb_x) / pb_w / pb_v[0]
output_box[:, :, 1] = (tb_y - pb_y) / pb_h / pb_v[1]
output_box[:, :, 2] = np.log(np.fabs(tb_w / pb_w)) / pb_v[2]
output_box[:, :, 3] = np.log(np.fabs(tb_h / pb_h)) / pb_v[3]
else:
output_box[:, :, 0] = (tb_x - pb_x) / pb_w / pb_v[:, :, 0]
output_box[:, :, 1] = (tb_y - pb_y) / pb_h / pb_v[:, :, 1]
output_box[:, :, 2] = np.log(np.fabs(tb_w / pb_w)) / pb_v[:, :, 2]
output_box[:, :, 3] = np.log(np.fabs(tb_h / pb_h)) / pb_v[:, :, 3]
def batch_box_coder(p_box, pb_v, t_box, lod, code_type, norm, axis=0):
n = t_box.shape[0]
m = p_box.shape[0]
if code_type == "decode_center_size":
m = t_box.shape[1]
output_box = np.zeros((n, m, 4), dtype=np.float32)
cur_offset = 0
for i in range(len(lod)):
if code_type == "encode_center_size":
box_encoder(
t_box[cur_offset : (cur_offset + lod[i]), :],
p_box,
pb_v,
output_box[cur_offset : (cur_offset + lod[i]), :, :],
norm,
)
elif code_type == "decode_center_size":
box_decoder(t_box, p_box, pb_v, output_box, norm, axis)
cur_offset += lod[i]
return output_box
@unittest.skipIf(
not paddle.is_compiled_with_npu(), "core is not compiled with NPU"
)
class TestBoxCoderOp(OpTest):
def setUp(self):
self.op_type = "box_coder"
self.set_npu()
self.init_dtype()
self.set_init_config()
self.set_inputs()
self.set_attrs()
self.set_outputs()
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def init_dtype(self):
self.dtype = np.float32
def set_init_config(self):
self.M = 81
self.N = 20
self.code_type = 'decode_center_size'
self.box_normalized = False
self.lod = [[1, 1, 1, 1, 1]]
self.axis = 0
self.use_variance = False
self.without_prior_box_var = False
self.atol = 1e-5
def set_inputs(self):
self.inputs = {}
assert self.code_type in ['decode_center_size', 'encode_center_size']
assert self.axis in [0, 1]
if self.code_type == 'decode_center_size':
assert not self.use_variance or not self.without_prior_box_var
self.prior_box = np.random.random((self.M, 4)).astype(self.dtype)
if self.use_variance:
self.prior_box_var = np.random.random(4).astype(self.dtype)
else:
if self.without_prior_box_var:
self.prior_box_var = np.ones((self.M, 4)).astype(self.dtype)
else:
self.prior_box_var = np.random.random((self.M, 4)).astype(
self.dtype
)
if self.axis == 0:
self.target_box = np.random.random((self.N, self.M, 4)).astype(
self.dtype
)
else:
self.target_box = np.random.random((self.M, self.N, 4)).astype(
self.dtype
)
self.inputs['PriorBox'] = self.prior_box
self.inputs['TargetBox'] = self.target_box
if not self.use_variance and not self.without_prior_box_var:
self.inputs['PriorBoxVar'] = self.prior_box_var
else:
# encode_center_size
self.prior_box = np.random.random((self.M, 4)).astype(self.dtype)
if self.use_variance:
self.prior_box_var = np.random.random(4).astype(self.dtype)
else:
self.prior_box_var = np.random.random((self.M, 4)).astype(
self.dtype
)
self.target_box = np.random.random((self.N, 4)).astype(self.dtype)
self.inputs['PriorBox'] = self.prior_box
# self.inputs['PriorBoxVar'] = self.prior_box_var
self.inputs['TargetBox'] = (self.target_box, self.lod)
if not self.use_variance:
self.inputs['PriorBoxVar'] = self.prior_box_var
def set_attrs(self):
self.attrs = {
'code_type': self.code_type,
'box_normalized': self.box_normalized,
}
if self.use_variance:
self.attrs['variance'] = self.prior_box_var.astype(
np.float64
).flatten()
if self.axis != 0:
self.attrs['axis'] = self.axis
def set_outputs(self):
output_box = batch_box_coder(
self.prior_box,
self.prior_box_var,
self.target_box,
self.lod[0],
self.code_type,
self.box_normalized,
self.axis,
)
self.outputs = {'OutputBox': output_box.astype(self.dtype)}
def test_check_output(self):
self.check_output_with_place(self.place, atol=self.atol)
class TestBoxCoderOpWithoutBoxVar(TestBoxCoderOp):
def set_init_config(self):
super().set_init_config()
self.without_prior_box_var = True
self.lod = [[0, 1, 2, 3, 4, 5]]
class TestBoxCoderOpWithLoD(TestBoxCoderOp):
def set_init_config(self):
super().set_init_config()
self.M = 20
self.N = 50
self.lod = [[10, 20, 20]]
self.code_type = 'encode_center_size'
self.box_normalized = True
class TestBoxCoderOpWithLoDWithVariance(TestBoxCoderOpWithLoD):
def set_init_config(self):
super().set_init_config()
self.use_variance = True
class TestBoxCoderOpWithAxis(TestBoxCoderOp):
def set_init_config(self):
super().set_init_config()
self.axis = 1
class TestBoxCoderOpWithVariance(TestBoxCoderOp):
def set_init_config(self):
super().set_init_config()
self.use_variance = True
class TestBoxCoderOpFP16(TestBoxCoderOp):
def init_dtype(self):
self.dtype = np.float16
def set_init_config(self):
super().set_init_config()
self.atol = 1e-2
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.c_embedding_op_base import (
TestCEmbeddingCPU,
TestCEmbeddingOpBase,
TestCEmbeddingOpFP32,
)
paddle.enable_static()
TestCEmbeddingCPU()
TestCEmbeddingOpBase()
TestCEmbeddingOpFP32()
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import os
from test_collective_base_npu import TestDistBase
paddle.enable_static()
class TestIdentityOp(TestDistBase):
def _setup_config(self):
pass
def test_identity(self, col_type="identity"):
dist_env = os.environ
self.check_with_place(
"collective_identity_op_npu.py", col_type, need_envs=dist_env
)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
paddle.enable_static()
SEED = 2021
@skip_check_grad_ci(reason="[skip NPU cast grad check] not implemented yet.")
class TestCast1(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "cast"
self.place = paddle.NPUPlace(0)
ipt = np.random.random(size=[10, 10]) + 1
self.inputs = {'X': ipt.astype('float32')}
self.outputs = {'Out': ipt.astype('float16')}
self.attrs = {
'in_dtype': int(core.VarDesc.VarType.FP32),
'out_dtype': int(core.VarDesc.VarType.FP16),
}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
@skip_check_grad_ci(reason="[skip NPU cast grad check] not implemented yet.")
class TestCast2(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "cast"
self.place = paddle.NPUPlace(0)
ipt = np.random.random(size=[10, 10]) + 1
self.inputs = {'X': ipt.astype('float16')}
self.outputs = {'Out': ipt.astype('float32')}
self.attrs = {
'in_dtype': int(core.VarDesc.VarType.FP16),
'out_dtype': int(core.VarDesc.VarType.FP32),
}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-3)
@skip_check_grad_ci(reason="[skip NPU cast grad check] not implemented yet.")
class TestCast3(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "cast"
self.place = paddle.NPUPlace(0)
ipt = np.random.random(size=[10, 10]) + 1
self.inputs = {'X': ipt.astype('int32')}
self.outputs = {'Out': ipt.astype('int32')}
self.attrs = {
'in_dtype': int(core.VarDesc.VarType.INT32),
'out_dtype': int(core.VarDesc.VarType.INT32),
}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-3)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import sys
sys.path.append("..")
from eager_op_test import OpTest
paddle.enable_static()
class TestClipByNormOp(OpTest):
def setUp(self):
self.set_npu()
self.max_relative_error = 0.006
self.init_dtype()
self.initTestCase()
input = np.random.random(self.shape).astype(self.dtype)
input[np.abs(input) < self.max_relative_error] = 0.5
self.op_type = "clip_by_norm"
self.inputs = {
'X': input,
}
self.attrs = {}
self.attrs['max_norm'] = self.max_norm
norm = np.sqrt(np.sum(np.square(input)))
if norm > self.max_norm:
output = self.max_norm * input / norm
else:
output = input
self.outputs = {'Out': output}
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def test_check_output(self):
self.check_output_with_place(self.place)
def initTestCase(self):
self.shape = (100,)
self.max_norm = 1.0
def init_dtype(self):
self.dtype = np.float32
class TestCase1(TestClipByNormOp):
def initTestCase(self):
self.shape = (100,)
self.max_norm = 1e20
class TestCase2(TestClipByNormOp):
def initTestCase(self):
self.shape = (16, 16)
self.max_norm = 0.1
class TestCase3(TestClipByNormOp):
def initTestCase(self):
self.shape = (4, 8, 16)
self.max_norm = 1.0
class TestClipByNormOpFp16(TestClipByNormOp):
def init_dtype(self):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-3)
class TestClipByNormOpFp16Case1(TestClipByNormOpFp16):
def initTestCase(self):
self.shape = (100,)
self.max_norm = 1e20
class TestClipByNormOpFp16Case2(TestClipByNormOpFp16):
def initTestCase(self):
self.shape = (16, 16)
self.max_norm = 0.1
class TestClipByNormOpFp16Case3(TestClipByNormOpFp16):
def initTestCase(self):
self.shape = (4, 8, 16)
self.max_norm = 1.0
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import sys
sys.path.append("..")
from eager_op_test import OpTest
class TestClipOp(OpTest):
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def setUp(self):
self.set_npu()
self.max_relative_error = 0.006
self.inputs = {}
self.initTestCase()
self.op_type = "clip"
self.attrs = {}
self.attrs['min'] = self.min
self.attrs['max'] = self.max
if 'Min' in self.inputs:
min_v = self.inputs['Min']
else:
min_v = self.attrs['min']
if 'Max' in self.inputs:
max_v = self.inputs['Max']
else:
max_v = self.attrs['max']
input = np.random.random(self.shape).astype("float32")
input[np.abs(input - min_v) < self.max_relative_error] = 0.5
input[np.abs(input - max_v) < self.max_relative_error] = 0.5
self.inputs['X'] = input
self.outputs = {'Out': np.clip(self.inputs['X'], min_v, max_v)}
def test_check_output(self):
paddle.enable_static()
self.check_output_with_place(self.place)
paddle.disable_static()
def test_check_grad_normal(self):
paddle.enable_static()
self.check_grad_with_place(self.place, ['X'], 'Out')
paddle.disable_static()
def initTestCase(self):
self.shape = (4, 10, 10)
self.max = 0.8
self.min = 0.3
self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.1]).astype('float32')
class TestCase1(TestClipOp):
def initTestCase(self):
self.shape = (8, 16, 8)
self.max = 0.7
self.min = 0.0
class TestCase2(TestClipOp):
def initTestCase(self):
self.shape = (8, 16)
self.max = 1.0
self.min = 0.0
class TestCase3(TestClipOp):
def initTestCase(self):
self.shape = (4, 8, 16)
self.max = 0.7
self.min = 0.2
class TestCase4(TestClipOp):
def initTestCase(self):
self.shape = (4, 8, 8)
self.max = 0.7
self.min = 0.2
self.inputs['Max'] = np.array([0.8]).astype('float32')
self.inputs['Min'] = np.array([0.3]).astype('float32')
class TestCase5(TestClipOp):
def initTestCase(self):
self.shape = (4, 8, 16)
self.max = 0.5
self.min = 0.5
class TestClipOpError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()):
input_data = np.random.random((2, 4)).astype("float32")
def test_Variable():
paddle.clip(x=input_data, min=-1.0, max=1.0)
self.assertRaises(TypeError, test_Variable)
def test_dtype():
x2 = paddle.static.data(name='x2', shape=[-1, 1], dtype='int32')
paddle.clip(x=x2, min=-1.0, max=1.0)
self.assertRaises(TypeError, test_dtype)
paddle.disable_static()
class TestClipAPI(unittest.TestCase):
def _executed_api(self, x, min=None, max=None):
return paddle.clip(x, min, max)
def test_clip(self):
paddle.enable_static()
data_shape = [1, 9, 9, 4]
data = np.random.random(data_shape).astype('float32')
images = paddle.static.data(name='image', shape=data_shape, dtype='float32')
min = paddle.static.data(name='min', shape=[1], dtype='float32')
max = paddle.static.data(name='max', shape=[1], dtype='float32')
place = (
fluid.NPUPlace(0)
if fluid.core.is_compiled_with_npu()
else fluid.CPUPlace()
)
exe = fluid.Executor(place)
out_1 = self._executed_api(images, min=min, max=max)
out_2 = self._executed_api(images, min=0.2, max=0.9)
out_3 = self._executed_api(images, min=0.3)
out_4 = self._executed_api(images, max=0.7)
out_5 = self._executed_api(images, min=min)
out_6 = self._executed_api(images, max=max)
out_7 = self._executed_api(images, max=-1.0)
out_8 = self._executed_api(images)
res1, res2, res3, res4, res5, res6, res7, res8 = exe.run(
fluid.default_main_program(),
feed={
"image": data,
"min": np.array([0.2]).astype('float32'),
"max": np.array([0.8]).astype('float32'),
},
fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8],
)
np.testing.assert_allclose(res1, data.clip(0.2, 0.8))
np.testing.assert_allclose(res2, data.clip(0.2, 0.9))
np.testing.assert_allclose(res3, data.clip(min=0.3))
np.testing.assert_allclose(res4, data.clip(max=0.7))
np.testing.assert_allclose(res5, data.clip(min=0.2))
np.testing.assert_allclose(res6, data.clip(max=0.8))
np.testing.assert_allclose(res7, data.clip(max=-1))
np.testing.assert_allclose(res8, data)
paddle.disable_static()
def test_clip_dygraph(self):
paddle.disable_static()
place = (
fluid.NPUPlace(0)
if fluid.core.is_compiled_with_npu()
else fluid.CPUPlace()
)
paddle.disable_static(place)
data_shape = [1, 9, 9, 4]
data = np.random.random(data_shape).astype('float32')
images = paddle.to_tensor(data, dtype='float32')
v_min = paddle.to_tensor(np.array([0.2], dtype=np.float32))
v_max = paddle.to_tensor(np.array([0.8], dtype=np.float32))
out_1 = self._executed_api(images, min=0.2, max=0.8)
images = paddle.to_tensor(data, dtype='float32')
out_2 = self._executed_api(images, min=0.2, max=0.9)
images = paddle.to_tensor(data, dtype='float32')
out_3 = self._executed_api(images, min=v_min, max=v_max)
np.testing.assert_allclose(out_1.numpy(), data.clip(0.2, 0.8))
np.testing.assert_allclose(out_2.numpy(), data.clip(0.2, 0.9))
np.testing.assert_allclose(out_3.numpy(), data.clip(0.2, 0.8))
def test_errors(self):
paddle.enable_static()
x1 = paddle.static.data(name='x1', shape=[1], dtype="int16")
x2 = paddle.static.data(name='x2', shape=[1], dtype="int8")
self.assertRaises(TypeError, paddle.clip, x=x1, min=0.2, max=0.8)
self.assertRaises(TypeError, paddle.clip, x=x2, min=0.2, max=0.8)
paddle.disable_static()
class TestInplaceClipAPI(TestClipAPI):
def _executed_api(self, x, min=None, max=None):
return x.clip_(min, max)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
paddle.enable_static()
class TestElementwiseFloorDiv(OpTest):
def setUp(self):
self.op_type = "elementwise_floordiv"
self.set_npu()
self.init_dtype()
self.init_input_output()
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y),
}
self.attrs = {}
self.outputs = {'Out': self.out}
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def init_input_output(self):
self.x = np.random.uniform(1, 1000, [10, 10]).astype(self.dtype)
self.y = np.random.uniform(1, 1000, [10, 10]).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y)
def init_dtype(self):
self.dtype = "int64"
def test_check_output(self):
self.check_output_with_place(self.place)
class TestElementwiseFloorDiv2(TestElementwiseFloorDiv):
def init_dtype(self):
self.dtype = "int32"
if __name__ == '__main__':
unittest.main()
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册