未验证 提交 b6ae6a5d 编写于 作者: J jjyaoao 提交者: GitHub

[Test Mv] remove remaining tests in unittests/mlu(#52291)

上级 d4571470
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import os
import sys
sys.path.append("..")
import signal
import time
from contextlib import closing
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import paddle.fluid.unique_name as nameGen
from paddle.fluid import core
import unittest
from multiprocessing import Process
import paddle.fluid.layers as layers
from functools import reduce
from test_sync_batch_norm_base_mlu import (
TestSyncBatchNormRunnerBase,
runtime_main,
)
from eager_op_test import OpTest, _set_use_system_allocator
from test_sync_batch_norm_op import create_or_get_tensor
_set_use_system_allocator(False)
paddle.enable_static()
class TestSyncBatchNormOpTraining(TestSyncBatchNormRunnerBase):
def __init__(self):
self.global_ring_id = 0
self.dtype = np.float32
self.bn_dtype = np.float32
self.N = 8
self.C = 16
self.H = 32
self.W = 32
self.dshape = [self.N, self.C, self.H, self.W]
self.atol = 1e-3
def get_model(
self,
main,
startup,
place,
layout,
seed,
sync_bn=False,
only_forward=False,
):
"""Build program."""
use_cudnn = False
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
data = paddle.static.data(
name='input',
shape=self.dshape,
dtype=self.dtype,
)
conv = paddle.static.nn.conv2d(
input=data,
num_filters=32,
filter_size=1,
param_attr=fluid.ParamAttr(name='conv2d_weight'),
bias_attr=False,
use_cudnn=use_cudnn,
)
if self.bn_dtype == np.float16:
conv = paddle.cast(conv, 'float16')
bn = paddle.static.nn.batch_norm(
conv,
param_attr=fluid.ParamAttr(name='bn_scale'),
bias_attr=fluid.ParamAttr(name='bn_bias'),
moving_mean_name='bn_moving_mean',
moving_variance_name='bn_moving_variance',
data_layout=layout,
is_test=only_forward,
)
if self.bn_dtype == np.float16:
bn = paddle.cast(bn, 'float32')
sigmoid = paddle.nn.functional.sigmoid(bn)
out = paddle.sum(sigmoid)
# if not sync_bn:
# out = out / core.get_mlu_device_count()
if not only_forward:
sgd_opt = fluid.optimizer.SGD(learning_rate=0.0)
sgd_opt.backward(out)
return [out, conv, bn]
if __name__ == "__main__":
# print('sync_batch_norm_op_mlu.py __main__')
runtime_main(TestSyncBatchNormOpTraining, "identity", 0)
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append('..')
from eager_op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
import paddle
import paddle.nn.functional as F
paddle.enable_static()
np.random.seed(10)
class TestAbs(OpTest):
def setUp(self):
self.op_type = "abs"
self.set_mlu()
self.dtype = 'float32'
self.shape = [4, 25]
np.random.seed(1024)
x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
# Because we set delta = 0.005 in calculating numeric gradient,
# if x is too small, such as 0.002, x_neg will be -0.003
# x_pos will be 0.007, so the numeric gradient is inaccurate.
# we should avoid this
x[np.abs(x) < 0.005] = 0.02
out = np.abs(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(
self.place, ['X'], ['Out']
)
class TestAbsHalf(OpTest):
def setUp(self):
self.op_type = "abs"
self.set_mlu()
self.dtype = 'float16'
self.shape = [7, 9, 13, 19]
np.random.seed(1024)
x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
# Because we set delta = 0.005 in calculating numeric gradient,
# if x is too small, such as 0.002, x_neg will be -0.003
# x_pos will be 0.007, so the numeric gradient is inaccurate.
# we should avoid this
x[np.abs(x) < 0.005] = 0.02
out = np.abs(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(
self.place, ['X'], ['Out']
)
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append('..')
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
paddle.enable_static()
class TestAccuracyOp(OpTest):
def setUp(self):
self.op_type = "accuracy"
self.place = paddle.device.MLUPlace(0)
self.__class__.use_mlu = True
self.dtype = np.float32
self.init_dtype()
n = 8192
infer = np.random.random((n, 1)).astype(self.dtype)
indices = np.random.randint(0, 2, (n, 1)).astype('int32')
label = np.random.randint(0, 2, (n, 1)).astype('int32')
self.inputs = {'Out': infer, 'Indices': indices, "Label": label}
num_correct = 0
for rowid in range(n):
for ele in indices[rowid]:
if ele == label[rowid]:
num_correct += 1
break
self.outputs = {
'Accuracy': np.array([num_correct / float(n)]).astype(self.dtype),
'Correct': np.array([num_correct]).astype("int32"),
'Total': np.array([n]).astype("int32"),
}
def init_dtype(self):
pass
def test_check_output(self):
self.check_output_with_place(self.place)
class TestAccuracyOpFp16(TestAccuracyOp):
def init_dtype(self):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-3)
class TestAccuracyOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of accuracy_op must be Variable.
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.MLUPlace(0)
)
label = paddle.static.data(
name='label', shape=[-1, 1], dtype="int32"
)
self.assertRaises(TypeError, paddle.static.accuracy, x1, label)
self.assertRaises(TypeError, paddle.metric.accuracy, x1, label)
# The input dtype of accuracy_op must be float32 or float64.
x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="int32")
self.assertRaises(TypeError, paddle.static.accuracy, x2, label)
self.assertRaises(TypeError, paddle.metric.accuracy, x2, label)
x3 = paddle.static.data(name='input', shape=[-1, 2], dtype="float16")
paddle.static.accuracy(input=x3, label=label)
paddle.metric.accuracy(input=x3, label=label)
class TestAccuracyAPI1(unittest.TestCase):
def setUp(self):
self.predictions = paddle.static.data(
shape=[2, 5], name="predictions", dtype="float32"
)
self.label = paddle.static.data(
shape=[2, 1], name="labels", dtype="int32"
)
self.result = paddle.static.accuracy(
input=self.predictions, label=self.label, k=1
)
self.input_predictions = np.array(
[[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]],
dtype="float32",
)
self.input_labels = np.array([[2], [0]], dtype="int32")
self.expect_value = np.array([0.5], dtype='float32')
def test_api(self):
exe = paddle.static.Executor()
(result,) = exe.run(
feed={
"predictions": self.input_predictions,
'labels': self.input_labels,
},
fetch_list=[self.result.name],
)
self.assertEqual((result == self.expect_value).all(), True)
class TestAccuracyAPI2(unittest.TestCase):
def test_api(self):
with fluid.dygraph.guard():
predictions = paddle.to_tensor(
[[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]],
dtype='float32',
)
label = paddle.to_tensor([[2], [0]], dtype="int32")
result = paddle.static.accuracy(input=predictions, label=label, k=1)
expect_value = np.array([0.5], dtype='float32')
self.assertEqual((result.numpy() == expect_value).all(), True)
class TestAccuracyAPI(unittest.TestCase):
def test_api(self):
with fluid.dygraph.guard():
predictions = paddle.to_tensor(
[[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]],
dtype='float32',
)
label = paddle.to_tensor([[2], [0]], dtype="int32")
result = paddle.metric.accuracy(input=predictions, label=label, k=1)
expect_value = np.array([0.5], dtype='float32')
self.assertEqual((result.numpy() == expect_value).all(), True)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from test_adam_op import adam_step
paddle.enable_static()
SEED = 2022
class TestAdam(OpTest):
def setUp(self):
self.set_mlu()
self.op_type = "adam"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.004
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
}
self.attrs = {'epsilon': epsilon, 'beta1': beta1, 'beta2': beta2}
param_out, moment1_out, moment2_out = adam_step(self.inputs, self.attrs)
self.outputs = {
'Moment1Out': moment1_out,
'Moment2Out': moment2_out,
'ParamOut': param_out,
'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1,
'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2,
}
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5)
class TestAdamWithEpsilonTensor(OpTest):
def setUp(self):
self.set_mlu()
self.op_type = "adam"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.004
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
'Beta1Tensor': np.array([beta1]).astype("float32"),
'Beta2Tensor': np.array([beta2]).astype("float32"),
'EpsilonTensor': np.array([epsilon]).astype("float32"),
}
self.attrs = {'epsilon': epsilon}
param_out, moment1_out, moment2_out = adam_step(self.inputs, self.attrs)
self.outputs = {
'Moment1Out': moment1_out,
'Moment2Out': moment2_out,
'ParamOut': param_out,
'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1,
'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2,
}
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5)
class TestAdamOpWithSkipUpdate(OpTest):
def setUp(self):
self.set_mlu()
self.op_type = "adam"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.004
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
'Beta1Tensor': np.array([beta1]).astype("float32"),
'Beta2Tensor': np.array([beta2]).astype("float32"),
'EpsilonTensor': np.array([epsilon]).astype("float32"),
"SkipUpdate": np.array([True]).astype("bool"),
}
self.attrs = {'epsilon': epsilon}
self.outputs = {
'Moment1Out': moment1,
'Moment2Out': moment2,
'ParamOut': param,
'Beta1PowOut': self.inputs['Beta1Pow'],
'Beta2PowOut': self.inputs['Beta2Pow'],
}
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5)
class TestAdamOpWithGlobalBetaPow(OpTest):
def setUp(self):
self.set_mlu()
self.op_type = "adam"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.004
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
'Beta1Tensor': np.array([beta1]).astype("float32"),
'Beta2Tensor': np.array([beta2]).astype("float32"),
'EpsilonTensor': np.array([epsilon]).astype("float32"),
}
attributes = {'epsilon': epsilon}
param_out, moment1_out, moment2_out = adam_step(self.inputs, attributes)
self.attrs = {'use_global_beta_pow': True}
# use_global_beta_pow=True, Beta1PowOut and Beta2PowOut are empty.
self.outputs = {
'Moment1Out': moment1_out,
'Moment2Out': moment2_out,
'ParamOut': param_out,
'Beta1PowOut': np.array([]),
'Beta2PowOut': np.array([]),
}
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5)
class TestNet(unittest.TestCase):
def _test(self, run_mlu=True):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
np.random.seed(SEED)
a_np = np.random.random(size=(32, 32)).astype('float32')
b_np = np.random.random(size=(32, 32)).astype('float32')
label_np = np.random.randint(2, size=(32, 1)).astype('int64')
with paddle.static.program_guard(main_prog, startup_prog):
a = paddle.static.data(name="a", shape=[32, 32], dtype='float32')
b = paddle.static.data(name="b", shape=[32, 32], dtype='float32')
label = paddle.static.data(
name="label", shape=[32, 1], dtype='int64'
)
sum = paddle.add(a, b)
z = paddle.pow(sum, 2.0)
fc_1 = paddle.static.nn.fc(x=z, size=128)
prediction = paddle.static.nn.fc(x=fc_1, size=2, activation='softmax')
cost = paddle.nn.functional.cross_entropy(input=prediction, label=label, reduction='none', use_softmax=False)
loss = paddle.mean(cost)
adam = fluid.optimizer.Adam(learning_rate=0.01)
adam.minimize(loss)
if run_mlu:
place = paddle.device.MLUPlace(0)
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
print("Start run on {}".format(place))
for epoch in range(100):
pred_res, loss_res = exe.run(
main_prog,
feed={"a": a_np, "b": b_np, "label": label_np},
fetch_list=[prediction, loss],
)
if epoch % 10 == 0:
print(
"Epoch {} | Prediction[0]: {}, Loss: {}".format(
epoch, pred_res[0], loss_res
)
)
return pred_res, loss_res
def test_mlu(self):
mlu_pred, mlu_loss = self._test(True)
cpu_pred, cpu_loss = self._test(False)
np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-3)
np.testing.assert_allclose(mlu_loss, cpu_loss, rtol=1e-3)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from test_adam_op import adamw_step
paddle.enable_static()
SEED = 2022
class TestAdamW(OpTest):
def setUp(self):
self.set_mlu()
self.op_type = "adamw"
param = np.random.uniform(-1, 1, (105, 102)).astype("float32")
grad = np.random.uniform(-1, 1, (105, 102)).astype("float32")
moment1 = np.random.uniform(-1, 1, (105, 102)).astype("float32")
# The second moment is positive
moment2 = np.random.random((105, 102)).astype("float32")
learning_rate = 0.5
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
}
self.attrs = {
'epsilon': epsilon,
'beta1': beta1,
'beta2': beta2,
"coeff": 0.9,
"with_decay": True,
}
param_out, moment1_out, moment2_out = adamw_step(
self.inputs, self.attrs
)
self.outputs = {
'Moment1Out': moment1_out,
'Moment2Out': moment2_out,
'ParamOut': param_out,
'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1,
'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2,
}
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5)
class TestAdamOpWithSkipUpdate(OpTest):
def setUp(self):
self.set_mlu()
self.op_type = "adamw"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.004
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
'Beta1Tensor': np.array([beta1]).astype("float32"),
'Beta2Tensor': np.array([beta2]).astype("float32"),
'EpsilonTensor': np.array([epsilon]).astype("float32"),
"SkipUpdate": np.array([True]).astype("bool"),
}
self.attrs = {'epsilon': epsilon, "coeff": 0.02, "with_decay": True}
self.outputs = {
'Moment1Out': moment1,
'Moment2Out': moment2,
'ParamOut': param,
'Beta1PowOut': self.inputs['Beta1Pow'],
'Beta2PowOut': self.inputs['Beta2Pow'],
}
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5)
class TestAdamOpWithoutDecay(OpTest):
def setUp(self):
self.set_mlu()
self.op_type = "adamw"
param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
moment1 = np.random.uniform(-1, 1, (102, 105)).astype("float32")
# The second moment is positive
moment2 = np.random.random((102, 105)).astype("float32")
learning_rate = 0.004
beta1 = 0.78
beta2 = 0.836
epsilon = 1e-4
beta1_pow = beta1**10
beta2_pow = beta2**10
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32"),
'Beta1Tensor': np.array([beta1]).astype("float32"),
'Beta2Tensor': np.array([beta2]).astype("float32"),
'EpsilonTensor': np.array([epsilon]).astype("float32"),
"SkipUpdate": np.array([True]).astype("bool"),
}
self.attrs = {'epsilon': epsilon, "coeff": 0.02, "with_decay": False}
self.outputs = {
'Moment1Out': moment1,
'Moment2Out': moment2,
'ParamOut': param,
'Beta1PowOut': self.inputs['Beta1Pow'],
'Beta2PowOut': self.inputs['Beta2Pow'],
}
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5)
class TestNet(unittest.TestCase):
def _test(self, run_mlu=True):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = SEED
startup_prog.random_seed = SEED
np.random.seed(SEED)
a_np = np.random.random(size=(32, 32)).astype('float32')
b_np = np.random.random(size=(32, 32)).astype('float32')
label_np = np.random.randint(2, size=(32, 1)).astype('int64')
with paddle.static.program_guard(main_prog, startup_prog):
a = paddle.static.data(name="a", shape=[32, 32], dtype='float32')
b = paddle.static.data(name="b", shape=[32, 32], dtype='float32')
label = paddle.static.data(
name="label", shape=[32, 1], dtype='int64'
)
sum = paddle.add(a, b)
z = paddle.pow(sum, 2.0)
fc_1 = paddle.static.nn.fc(x=z, size=128)
prediction = paddle.static.nn.fc(x=fc_1, size=2, activation='softmax')
cost = paddle.nn.functional.cross_entropy(input=prediction, label=label, reduction='none', use_softmax=False)
loss = paddle.mean(cost)
adam = paddle.optimizer.AdamW(learning_rate=0.01, weight_decay=0.02)
adam.minimize(loss)
if run_mlu:
place = paddle.device.MLUPlace(0)
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
print("Start run on {}".format(place))
for epoch in range(100):
pred_res, loss_res = exe.run(
main_prog,
feed={"a": a_np, "b": b_np, "label": label_np},
fetch_list=[prediction, loss],
)
if epoch % 10 == 0:
print(
"Epoch {} | Prediction[0]: {}, Loss: {}".format(
epoch, pred_res[0], loss_res
)
)
return pred_res, loss_res
def test_mlu(self):
mlu_pred, mlu_loss = self._test(True)
cpu_pred, cpu_loss = self._test(False)
np.testing.assert_allclose(mlu_pred, cpu_pred, rtol=1e-3)
np.testing.assert_allclose(mlu_loss, cpu_loss, rtol=1e-3)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
paddle.enable_static()
SEED = 2022
class TestCheckFiniteAndUnscaleOp(OpTest):
def setUp(self):
self.set_mlu()
self.op_type = "check_finite_and_unscale"
self.init_dtype()
self.init_test_case()
def init_test_case(self):
x = np.random.random((129, 129)).astype(self.dtype)
scale = np.random.random((1)).astype(self.dtype)
self.inputs = {'X': [('x0', x)], 'Scale': scale}
self.outputs = {
'FoundInfinite': np.array([0]),
'Out': [('out0', x / scale)],
}
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place)
class TestCheckFiniteAndUnscaleOpWithNan(TestCheckFiniteAndUnscaleOp):
def init_test_case(self):
x = np.random.random((129, 129)).astype(self.dtype)
x[128][128] = np.nan
scale = np.random.random((1)).astype(self.dtype)
self.inputs = {'X': [('x0', x)], 'Scale': scale}
self.outputs = {
'FoundInfinite': np.array([1]),
'Out': [('out0', x)],
}
def test_check_output(self):
# When input contains nan, do not check the output,
# since the output may be nondeterministic and will be discarded.
self.check_output_with_place(self.place, no_check_set=['Out'])
class TestCheckFiniteAndUnscaleOpWithInf(TestCheckFiniteAndUnscaleOp):
def init_test_case(self):
x = np.random.random((129, 129)).astype(self.dtype)
x[128][128] = np.inf
scale = np.random.random((1)).astype(self.dtype)
self.inputs = {'X': [('x0', x)], 'Scale': scale}
self.outputs = {
'FoundInfinite': np.array([1]),
'Out': [('out0', x)],
}
def test_check_output(self):
# When input contains inf, do not check the output,
# since the output may be nondeterministic and will be discarded.
self.check_output_with_place(self.place, no_check_set=['Out'])
class TestCheckFiniteAndUnscaleOpMultiInput(TestCheckFiniteAndUnscaleOp):
def init_test_case(self):
x0 = np.random.random((129, 129)).astype(self.dtype)
x1 = np.random.random((129, 129)).astype(self.dtype)
scale = np.random.random((1)).astype(self.dtype)
self.inputs = {'X': [('x0', x0), ('x1', x1)], 'Scale': scale}
self.outputs = {
'FoundInfinite': np.array([0]),
'Out': [('out0', x0 / scale), ('out1', x1 / scale)],
}
class TestCheckFiniteAndUnscaleOpMultiInputWithNan(TestCheckFiniteAndUnscaleOp):
def init_test_case(self):
x0 = np.random.random((129, 129)).astype(self.dtype)
x0[128][128] = np.nan
x1 = np.random.random((129, 129)).astype(self.dtype)
scale = np.random.random((1)).astype(self.dtype)
self.inputs = {'X': [('x0', x0), ('x1', x1)], 'Scale': scale}
self.outputs = {
'FoundInfinite': np.array([1]),
'Out': [('out0', x0 / scale), ('out1', x1 / scale)],
}
def test_check_output(self):
# When input contains inf, do not check the output,
# since the output may be nondeterministic and will be discarded.
self.check_output_with_place(self.place, no_check_set=['Out'])
class TestCheckFiniteAndUnscaleOpMultiInputWithInf(TestCheckFiniteAndUnscaleOp):
def init_test_case(self):
x0 = np.random.random((129, 129)).astype(self.dtype)
x0[128][128] = np.nan
x1 = np.random.random((129, 129)).astype(self.dtype)
x1[128][128] = np.inf
scale = np.random.random((1)).astype(self.dtype)
self.inputs = {'X': [('x0', x0), ('x1', x1)], 'Scale': scale}
self.outputs = {
'FoundInfinite': np.array([1]),
'Out': [('out0', x0 / scale), ('out1', x1 / scale)],
}
def test_check_output(self):
# When input contains inf, do not check the output,
# since the output may be nondeterministic and will be discarded.
self.check_output_with_place(self.place, no_check_set=['Out'])
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
paddle.enable_static()
class BaseTestCase(OpTest):
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.MLUPlace(0)
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 0
def setUp(self):
self.set_mlu()
self.initTestCase()
self.x = (1000 * np.random.random(self.dims)).astype(self.dtype)
self.inputs = {'X': self.x}
self.attrs = {'axis': self.axis}
self.outputs = {'Out': np.argmax(self.x, axis=self.axis)}
def test_check_output(self):
self.check_output_with_place(self.place)
class TestArgMaxSameValue1(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dtype = 'float32'
self.axis = 0
def setUp(self):
self.set_mlu()
self.initTestCase()
self.x = np.array([1, 2, 3, 5, 4, 5]).astype(self.dtype)
self.inputs = {'X': self.x}
self.attrs = {'axis': self.axis}
self.outputs = {'Out': np.argmax(self.x, axis=self.axis)}
class TestArgMaxSameValue2(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dtype = 'float16'
self.axis = 0
def setUp(self):
self.set_mlu()
self.initTestCase()
self.x = np.array([[2, 3, 5, 5], [3, 2, 5, 5]]).astype(self.dtype)
self.inputs = {'X': self.x}
self.attrs = {'axis': self.axis}
self.outputs = {'Out': np.argmax(self.x, axis=self.axis)}
# test argmax, dtype: float16
class TestArgMaxFloat16Case1(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float16'
self.axis = -1
class TestArgMaxFloat16Case2(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float16'
self.axis = 0
class TestArgMaxFloat16Case3(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float16'
self.axis = 1
class TestArgMaxFloat16Case4(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float16'
self.axis = 2
class TestArgMaxFloat16Case5(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4)
self.dtype = 'float16'
self.axis = -1
class TestArgMaxFloat16Case6(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4)
self.dtype = 'float16'
self.axis = 0
class TestArgMaxFloat16Case7(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4)
self.dtype = 'float16'
self.axis = 1
class TestArgMaxFloat16Case8(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (1,)
self.dtype = 'float16'
self.axis = 0
class TestArgMaxFloat16Case9(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (2,)
self.dtype = 'float16'
self.axis = 0
class TestArgMaxFloat16Case10(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3,)
self.dtype = 'float16'
self.axis = 0
# test argmax, dtype: float32
class TestArgMaxFloat32Case1(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = -1
class TestArgMaxFloat32Case2(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 0
class TestArgMaxFloat32Case3(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 1
class TestArgMaxFloat32Case4(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 2
class TestArgMaxFloat32Case5(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4)
self.dtype = 'float32'
self.axis = -1
class TestArgMaxFloat32Case6(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4)
self.dtype = 'float32'
self.axis = 0
class TestArgMaxFloat32Case7(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3, 4)
self.dtype = 'float32'
self.axis = 1
class TestArgMaxFloat32Case8(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (1,)
self.dtype = 'float32'
self.axis = 0
class TestArgMaxFloat32Case9(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (2,)
self.dtype = 'float32'
self.axis = 0
class TestArgMaxFloat32Case10(BaseTestCase):
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (3,)
self.dtype = 'float32'
self.axis = 0
class BaseTestComplex1_1(OpTest):
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.MLUPlace(0)
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (4, 5, 6)
self.dtype = 'float32'
self.axis = 2
def setUp(self):
self.set_mlu()
self.initTestCase()
self.x = (np.random.random(self.dims)).astype(self.dtype)
self.inputs = {'X': self.x}
self.attrs = {
'axis': self.axis,
'dtype': int(core.VarDesc.VarType.INT32),
}
self.outputs = {
'Out': np.argmax(self.x, axis=self.axis).astype("int32")
}
def test_check_output(self):
self.check_output_with_place(self.place)
class BaseTestComplex1_2(OpTest):
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.MLUPlace(0)
def initTestCase(self):
self.op_type = 'arg_max'
self.dims = (4, 5, 6)
self.dtype = 'float16'
self.axis = 2
def setUp(self):
self.set_mlu()
self.initTestCase()
self.x = (np.random.random(self.dims)).astype(self.dtype)
self.inputs = {'X': self.x}
self.attrs = {
'axis': self.axis,
'dtype': int(core.VarDesc.VarType.INT32),
}
self.outputs = {
'Out': np.argmax(self.x, axis=self.axis).astype("int32")
}
def test_check_output(self):
self.check_output_with_place(self.place)
class TestArgMaxAPI(unittest.TestCase):
def initTestCase(self):
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 0
def setUp(self):
self.initTestCase()
self.__class__.use_mlu = True
self.place = [paddle.MLUPlace(0)]
def test_dygraph_api(self):
def run(place):
paddle.disable_static(place)
np.random.seed(2022)
numpy_input = (np.random.random(self.dims)).astype(self.dtype)
tensor_input = paddle.to_tensor(numpy_input)
numpy_output = np.argmax(numpy_input, axis=self.axis)
paddle_output = paddle.argmax(tensor_input, axis=self.axis)
np.testing.assert_allclose(
numpy_output, paddle_output.numpy(), rtol=1e-05
)
paddle.enable_static()
for place in self.place:
run(place)
class TestArgMaxAPI_2(unittest.TestCase):
def initTestCase(self):
self.dims = (3, 4, 5)
self.dtype = 'float32'
self.axis = 0
self.keep_dims = True
def setUp(self):
self.initTestCase()
self.__class__.use_mlu = True
self.place = [paddle.MLUPlace(0)]
def test_dygraph_api(self):
def run(place):
paddle.disable_static(place)
np.random.seed(2022)
numpy_input = (np.random.random(self.dims)).astype(self.dtype)
tensor_input = paddle.to_tensor(numpy_input)
numpy_output = np.argmax(numpy_input, axis=self.axis).reshape(
1, 4, 5
)
paddle_output = paddle.argmax(
tensor_input, axis=self.axis, keepdim=self.keep_dims
)
np.testing.assert_allclose(
numpy_output, paddle_output.numpy(), rtol=1e-05
)
self.assertEqual(numpy_output.shape, paddle_output.numpy().shape)
paddle.enable_static()
for place in self.place:
run(place)
class TestArgMaxAPI_3(unittest.TestCase):
def initTestCase(self):
self.dims = (1, 9)
self.dtype = 'float32'
def setUp(self):
self.initTestCase()
self.__class__.use_mlu = True
self.place = [paddle.MLUPlace(0)]
def test_dygraph_api(self):
def run(place):
paddle.disable_static(place)
np.random.seed(2022)
numpy_input = (np.random.random(self.dims)).astype(self.dtype)
tensor_input = paddle.to_tensor(numpy_input)
numpy_output = np.argmax(numpy_input).reshape([1])
paddle_output = paddle.argmax(tensor_input)
np.testing.assert_allclose(
numpy_output, paddle_output.numpy(), rtol=1e-05
)
self.assertEqual(numpy_output.shape, paddle_output.numpy().shape)
paddle.enable_static()
for place in self.place:
run(place)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
paddle.enable_static()
SEED = 2022
def gen_test_class(dtype, axis, descending):
class TestArgsortOp(OpTest):
def setUp(self):
np.random.seed(SEED)
self.set_mlu()
self.op_type = "argsort"
self.place = paddle.MLUPlace(0)
self.init_inputshape()
if 'int' in dtype:
self.x = np.random.choice(255, self.size, replace=False)
self.x = self.x.reshape(self.input_shape).astype(dtype)
else:
self.x = np.random.random(self.input_shape).astype(dtype)
self.inputs = {"X": self.x}
self.attrs = {"axis": axis, "descending": descending}
self.get_output()
self.outputs = {"Out": self.sorted_x, "Indices": self.indices}
def get_output(self):
if descending:
self.indices = np.flip(
np.argsort(self.x, kind='heapsort', axis=axis), axis
)
self.sorted_x = np.flip(
np.sort(self.x, kind='heapsort', axis=axis), axis
)
else:
self.indices = np.argsort(self.x, kind='heapsort', axis=axis)
self.sorted_x = np.sort(self.x, kind='heapsort', axis=axis)
def test_check_grad(self):
if dtype in ['float16', 'int8', 'uint8', 'int32']:
self.__class__.no_need_check_grad = True
else:
self.check_grad_with_place(self.place, ["X"], "Out")
def set_mlu(self):
self.__class__.use_mlu = True
def init_inputshape(self):
self.input_shape = (5, 2, 2, 3, 3)
self.size = np.prod(self.input_shape)
def test_check_output(self):
self.check_output_with_place(self.place)
def init_direction(self):
self.descending = False
cls_name = "{}_{}_{}_TestArgsortOp".format(dtype, axis, descending)
TestArgsortOp.__name__ = cls_name
globals()[cls_name] = TestArgsortOp
for dtype in ['float32', 'float16', 'int8', 'uint8', 'int32']:
for axis in [1, 2, 3, -1]:
for descending in [False]:
gen_test_class(dtype, axis, descending)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
paddle.enable_static()
SEED = 2022
class TestAssign(OpTest):
def setUp(self):
self.set_mlu()
self.op_type = "assign"
self.init_dtype()
x = np.random.random([3, 3]).astype(self.dtype)
self.inputs = {'X': x}
self.attrs = {}
self.outputs = {'Out': x}
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
import sys
sys.path.append("..")
import eager_op_test
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
paddle.enable_static()
numpy.random.seed(2022)
class TestAssignValueMLUOp(eager_op_test.OpTest):
def setUp(self):
self.set_mlu()
self.op_type = "assign_value"
self.inputs = {}
self.attrs = {}
self.init_data()
self.attrs["shape"] = self.value.shape
self.attrs["dtype"] = framework.convert_np_dtype_to_dtype_(
self.value.dtype
)
self.outputs = {"Out": self.value}
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
def init_data(self):
self.value = numpy.random.random(size=(2, 5)).astype(numpy.float32)
self.attrs["fp32_values"] = [float(v) for v in self.value.flat]
def test_check_output(self):
self.check_output_with_place(self.place)
class TestAssignValueMLUOp2(TestAssignValueMLUOp):
def init_data(self):
self.value = numpy.random.random(size=(2, 5)).astype(numpy.int32)
self.attrs["int32_values"] = [int(v) for v in self.value.flat]
class TestAssignValueMLUOp3(TestAssignValueMLUOp):
def init_data(self):
self.value = numpy.random.random(size=(2, 5)).astype(numpy.int64)
self.attrs["int64_values"] = [int(v) for v in self.value.flat]
class TestAssignValueMLUOp4(TestAssignValueMLUOp):
def init_data(self):
self.value = numpy.random.choice(a=[False, True], size=(2, 5)).astype(
numpy.bool
)
self.attrs["bool_values"] = [int(v) for v in self.value.flat]
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import sys
sys.path.append("..")
from eager_op_test import OpTest, _set_use_system_allocator
from paddle.fluid.framework import grad_var_name
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import paddle
paddle.enable_static()
class TestBatchNorm(unittest.TestCase):
def test_name(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_mlu():
places.append(fluid.MLUPlace(0))
for p in places:
with fluid.dygraph.guard(p):
batch_norm1d = paddle.nn.BatchNorm1D(1, name="test")
def test_error(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_mlu():
places.append(fluid.MLUPlace(0))
for p in places:
# paddle.disable_static()
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
def error1d_dataformat():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
batch_norm1d = paddle.nn.BatchNorm1D(1, data_format='NCDHW')
batch_norm1d(fluid.dygraph.to_variable(x_data_4))
def error2d_dataformat():
x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
batch_norm2d = paddle.nn.BatchNorm2D(1, data_format='NCDHW')
batch_norm2d(fluid.dygraph.to_variable(x_data_3))
def error3d_dataformat():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
batch_norm3d = paddle.nn.BatchNorm3D(1, data_format='NCL')
batch_norm3d(fluid.dygraph.to_variable(x_data_4))
def error1d():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
batch_norm1d = paddle.nn.BatchNorm1D(1)
batch_norm1d(fluid.dygraph.to_variable(x_data_4))
def error2d():
x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
batch_norm2d = paddle.nn.BatchNorm2D(1)
batch_norm2d(fluid.dygraph.to_variable(x_data_3))
def error3d():
x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
batch_norm3d = paddle.nn.BatchNorm3D(1)
batch_norm3d(fluid.dygraph.to_variable(x_data_4))
with fluid.dygraph.guard(p):
self.assertRaises(ValueError, error1d)
self.assertRaises(ValueError, error2d)
self.assertRaises(ValueError, error3d)
self.assertRaises(ValueError, error1d_dataformat)
self.assertRaises(ValueError, error2d_dataformat)
self.assertRaises(ValueError, error3d_dataformat)
def test_dygraph(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_mlu():
places.append(fluid.MLUPlace(0))
for p in places:
shape = [4, 10, 4, 4]
def compute_v1(x, is_test, trainable_statistics):
with fluid.dygraph.guard(p):
bn = paddle.nn.BatchNorm(
shape[1],
is_test=is_test,
trainable_statistics=trainable_statistics,
)
y = bn(fluid.dygraph.to_variable(x))
return y.numpy()
def compute_v2(x):
with fluid.dygraph.guard(p):
bn = paddle.nn.BatchNorm2D(shape[1])
y = bn(fluid.dygraph.to_variable(x))
return y.numpy()
def compute_v3(x, is_test, trainable_statistics):
with fluid.dygraph.guard(p):
bn = paddle.nn.BatchNorm(
shape[1],
is_test=is_test,
param_attr=fluid.ParamAttr(
initializer=paddle.nn.initializer.Constant(1.0),
trainable=False,
),
bias_attr=fluid.ParamAttr(
initializer=paddle.nn.initializer.Constant(0.0),
trainable=False,
),
trainable_statistics=trainable_statistics,
)
y = bn(fluid.dygraph.to_variable(x))
return y.numpy()
def compute_v4(x):
with fluid.dygraph.guard(p):
bn = paddle.nn.BatchNorm2D(
shape[1], weight_attr=False, bias_attr=False
)
y = bn(fluid.dygraph.to_variable(x))
return y.numpy()
x = np.random.randn(*shape).astype("float32")
y1 = compute_v1(x, False, False)
y2 = compute_v2(x)
y3 = compute_v3(x, False, False)
y4 = compute_v4(x)
np.testing.assert_allclose(y1, y2)
np.testing.assert_allclose(y3, y4)
def test_static(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_mlu():
places.append(fluid.MLUPlace(0))
for p in places:
exe = fluid.Executor(p)
shape = [4, 10, 16, 16]
def compute_v1(x_np, is_test, trainable_statistics):
with program_guard(Program(), Program()):
bn = paddle.nn.BatchNorm(
shape[1],
is_test=is_test,
trainable_statistics=trainable_statistics,
)
x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = bn(x)
exe.run(fluid.default_startup_program())
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
return r
def compute_v2(x_np):
with program_guard(Program(), Program()):
bn = paddle.nn.BatchNorm2D(shape[1])
x = paddle.static.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = bn(x)
exe.run(fluid.default_startup_program())
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
return r
x = np.random.randn(*shape).astype("float32")
y1 = compute_v1(x, False, False)
y2 = compute_v2(x)
np.testing.assert_allclose(y1, y2)
class TestBatchNormChannelLast(unittest.TestCase):
def setUp(self):
self.original_dtyep = paddle.get_default_dtype()
paddle.set_default_dtype("float32")
self.places = [fluid.CPUPlace()]
if core.is_compiled_with_mlu():
self.places.append(fluid.MLUPlace(0))
def tearDown(self):
paddle.set_default_dtype(self.original_dtyep)
def test_1d(self):
for p in self.places:
with fluid.dygraph.guard(p):
x = paddle.randn([2, 6, 4])
net1 = paddle.nn.BatchNorm1D(4, data_format="NLC")
net2 = paddle.nn.BatchNorm1D(4)
net2.weight = net1.weight
net2.bias = net1.bias
y1 = net1(x)
channel_first_x = paddle.transpose(x, [0, 2, 1])
y2 = net2(channel_first_x)
y2 = paddle.transpose(y2, [0, 2, 1])
np.testing.assert_allclose(
y1.numpy(), y2.numpy(), rtol=1e-05, atol=1e-07
)
def test_2d(self):
for p in self.places:
with fluid.dygraph.guard(p):
x = paddle.randn([2, 6, 6, 4])
net1 = paddle.nn.BatchNorm2D(4, data_format="NHWC")
net2 = paddle.nn.BatchNorm2D(4)
net2.weight = net1.weight
net2.bias = net1.bias
y1 = net1(x)
channel_first_x = paddle.transpose(x, [0, 3, 1, 2])
y2 = net2(channel_first_x)
y2 = paddle.transpose(y2, [0, 2, 3, 1])
np.testing.assert_allclose(
y1.numpy(), y2.numpy(), rtol=1e-05, atol=1e-07
)
def test_3d(self):
for p in self.places:
with fluid.dygraph.guard(p):
x = paddle.randn([2, 6, 6, 6, 4])
net1 = paddle.nn.BatchNorm3D(4, data_format="NDHWC")
net2 = paddle.nn.BatchNorm3D(4)
net2.weight = net1.weight
net2.bias = net1.bias
y1 = net1(x)
channel_first_x = paddle.transpose(x, [0, 4, 1, 2, 3])
y2 = net2(channel_first_x)
y2 = paddle.transpose(y2, [0, 2, 3, 4, 1])
np.testing.assert_allclose(
y1.numpy(), y2.numpy(), rtol=1e-05, atol=1e-07
)
# res = np.allclose(y1.numpy(), y2.numpy())
# if res == False:
# np.savetxt("./y1.txt", y1.numpy().flatten(), fmt='%.10f', delimiter='\n')
# np.savetxt("./y2.txt", y2.numpy().flatten(), fmt='%.10f', delimiter='\n')
# self.assertEqual(res, True)
class TestBatchNormUseGlobalStats(unittest.TestCase):
def setUp(self):
self.places = [fluid.CPUPlace()]
if core.is_compiled_with_mlu():
self.places.append(fluid.MLUPlace(0))
self.init_test()
### train mode
def init_test(self):
self.use_global_stats = True
self.trainable_statistics = False
def test_global_stats(self):
for p in self.places:
with fluid.dygraph.guard(p):
x = paddle.randn([2, 6, 6, 4])
net1 = paddle.nn.BatchNorm(
6,
param_attr=fluid.ParamAttr(
initializer=paddle.nn.initializer.Constant(1.0)
),
use_global_stats=self.use_global_stats,
trainable_statistics=self.trainable_statistics,
)
net2 = paddle.nn.BatchNorm2D(
6, use_global_stats=self.use_global_stats
)
net2.weight = net1.weight
net2.bias = net1.bias
if self.trainable_statistics == True:
net1.training = False
net2.training = False
y1 = net1(x)
y2 = net2(x)
np.testing.assert_allclose(y1.numpy(), y2.numpy(), rtol=1e-05)
class TestBatchNormUseGlobalStatsCase1(TestBatchNormUseGlobalStats):
### test mode
def init_test(self):
self.use_global_stats = False
self.trainable_statistics = True
class TestBatchNormUseGlobalStatsCase2(TestBatchNormUseGlobalStats):
### train mode
def init_test(self):
self.use_global_stats = False
self.trainable_statistics = False
class TestBatchNormUseGlobalStatsCase3(TestBatchNormUseGlobalStats):
### test mode
def init_test(self):
self.use_global_stats = True
self.trainable_statistics = True
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
import numpy as np
import unittest
import sys
sys.path.append('..')
from eager_op_test import OpTest
paddle.enable_static()
def test_static_layer(
place, input_np, label_np, reduction='mean', weight_np=None
):
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.static.data(
name='input', shape=input_np.shape, dtype='float32'
)
label = paddle.static.data(
name='label', shape=label_np.shape, dtype='float32'
)
if weight_np is not None:
weight = paddle.static.data(
name='weight', shape=weight_np.shape, dtype='float32'
)
bce_loss = paddle.nn.loss.BCELoss(
weight=weight, reduction=reduction
)
else:
bce_loss = paddle.nn.loss.BCELoss(reduction=reduction)
res = bce_loss(input, label)
exe = paddle.static.Executor(place)
static_result = exe.run(
prog,
feed={"input": input_np, "label": label_np}
if weight_np is None
else {"input": input_np, "label": label_np, "weight": weight_np},
fetch_list=[res],
)
return static_result[0]
def test_static_functional(
place, input_np, label_np, reduction='mean', weight_np=None
):
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.static.data(
name='input', shape=input_np.shape, dtype='float32'
)
label = paddle.static.data(
name='label', shape=label_np.shape, dtype='float32'
)
if weight_np is not None:
weight = paddle.static.data(
name='weight', shape=weight_np.shape, dtype='float32'
)
res = paddle.nn.functional.binary_cross_entropy(
input, label, weight=weight, reduction=reduction
)
else:
res = paddle.nn.functional.binary_cross_entropy(
input, label, reduction=reduction
)
exe = paddle.static.Executor(place)
static_result = exe.run(
prog,
feed={"input": input_np, "label": label_np}
if weight_np is None
else {"input": input_np, "label": label_np, "weight": weight_np},
fetch_list=[res],
)
return static_result[0]
def test_dygraph_layer(
place, input_np, label_np, reduction='mean', weight_np=None
):
paddle.disable_static()
if weight_np is not None:
weight = paddle.to_tensor(weight_np)
bce_loss = paddle.nn.loss.BCELoss(weight=weight, reduction=reduction)
else:
bce_loss = paddle.nn.loss.BCELoss(reduction=reduction)
dy_res = bce_loss(paddle.to_tensor(input_np), paddle.to_tensor(label_np))
dy_result = dy_res.numpy()
paddle.enable_static()
return dy_result
def test_dygraph_functional(
place, input_np, label_np, reduction='mean', weight_np=None
):
paddle.disable_static()
input = paddle.to_tensor(input_np)
label = paddle.to_tensor(label_np)
if weight_np is not None:
weight = paddle.to_tensor(weight_np)
dy_res = paddle.nn.functional.binary_cross_entropy(
input, label, weight=weight, reduction=reduction
)
else:
dy_res = paddle.nn.functional.binary_cross_entropy(
input, label, reduction=reduction
)
dy_result = dy_res.numpy()
paddle.enable_static()
return dy_result
def calc_bceloss(input_np, label_np, reduction='mean', weight_np=None):
if weight_np is None:
expected = -1 * (
label_np * np.log(input_np)
+ (1.0 - label_np) * np.log(1.0 - input_np)
)
else:
expected = (
-1
* weight_np
* (
label_np * np.log(input_np)
+ (1.0 - label_np) * np.log(1.0 - input_np)
)
)
if reduction == 'mean':
expected = np.mean(expected)
elif reduction == 'sum':
expected = np.sum(expected)
else:
expected = expected
return expected
class TestBCELoss(unittest.TestCase):
def test_BCELoss(self):
input_np = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float32)
label_np = np.random.randint(0, 2, size=(20, 30)).astype(np.float32)
places = [fluid.MLUPlace(0)]
reductions = ['sum', 'mean', 'none']
for place in places:
for reduction in reductions:
static_result = test_static_layer(
place, input_np, label_np, reduction
)
dy_result = test_dygraph_layer(
place, input_np, label_np, reduction
)
expected = calc_bceloss(input_np, label_np, reduction)
np.testing.assert_allclose(static_result, expected, rtol=1e-6)
np.testing.assert_allclose(static_result, dy_result)
np.testing.assert_allclose(dy_result, expected, rtol=1e-6)
static_functional = test_static_functional(
place, input_np, label_np, reduction
)
dy_functional = test_dygraph_functional(
place, input_np, label_np, reduction
)
np.testing.assert_allclose(
static_functional, expected, rtol=1e-6
)
np.testing.assert_allclose(static_functional, dy_functional)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-6)
def test_BCELoss_weight(self):
input_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype(
np.float32
)
label_np = np.random.randint(0, 2, size=(2, 3, 4, 10)).astype(
np.float32
)
weight_np = np.random.random(size=(3, 4, 10)).astype(np.float32)
place = fluid.MLUPlace(0)
for reduction in ['sum', 'mean', 'none']:
static_result = test_static_layer(
place, input_np, label_np, reduction, weight_np=weight_np
)
dy_result = test_dygraph_layer(
place, input_np, label_np, reduction, weight_np=weight_np
)
expected = calc_bceloss(
input_np, label_np, reduction, weight_np=weight_np
)
np.testing.assert_allclose(static_result, expected, rtol=1e-6)
np.testing.assert_allclose(static_result, dy_result)
np.testing.assert_allclose(dy_result, expected, rtol=1e-6)
static_functional = test_static_functional(
place, input_np, label_np, reduction, weight_np=weight_np
)
dy_functional = test_dygraph_functional(
place, input_np, label_np, reduction, weight_np=weight_np
)
np.testing.assert_allclose(static_functional, expected, rtol=1e-6)
np.testing.assert_allclose(static_functional, dy_functional)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-6)
def test_BCELoss_error(self):
paddle.disable_static()
self.assertRaises(
ValueError, paddle.nn.loss.BCELoss, reduction="unsupport reduction"
)
input = paddle.to_tensor([[0.1, 0.3]], dtype='float32')
label = paddle.to_tensor([[0.0, 1.0]], dtype='float32')
self.assertRaises(
ValueError,
paddle.nn.functional.binary_cross_entropy,
input=input,
label=label,
reduction="unsupport reduction",
)
paddle.enable_static()
def bce_loss(input, label):
return -1 * (label * np.log(input) + (1.0 - label) * np.log(1.0 - input))
class TestBceLossOp(OpTest):
def setUp(self):
self.init_test_case()
self.op_type = "bce_loss"
self.place = paddle.device.MLUPlace(0)
self.__class__.use_mlu = True
input_np = np.random.uniform(0.1, 0.8, self.shape).astype("float32")
label_np = np.random.randint(0, 2, self.shape).astype("float32")
output_np = bce_loss(input_np, label_np)
self.inputs = {'X': input_np, 'Label': label_np}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
def init_test_case(self):
self.shape = [10, 10]
class TestBceLossOpCase1(TestBceLossOp):
def init_test_case(self):
self.shape = [2, 3, 4, 5]
class TestBceLossOpCase2(TestBceLossOp):
def init_test_case(self):
self.shape = [2, 3, 20]
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
import numpy as np
import unittest
import sys
sys.path.append('..')
from eager_op_test import OpTest
from test_bce_with_logits_loss import (
call_bce_layer,
call_bce_functional,
test_dygraph,
calc_bce_with_logits_loss,
)
def test_static(
place,
logit_np,
label_np,
weight_np=None,
reduction='mean',
pos_weight_np=None,
functional=False,
):
paddle.enable_static()
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
logit = paddle.static.data(
name='logit', shape=logit_np.shape, dtype='float32'
)
label = paddle.static.data(
name='label', shape=label_np.shape, dtype='float32'
)
feed_dict = {"logit": logit_np, "label": label_np}
pos_weight = None
weight = None
if pos_weight_np is not None:
pos_weight = paddle.static.data(
name='pos_weight', shape=pos_weight_np.shape, dtype='float32'
)
feed_dict["pos_weight"] = pos_weight_np
if weight_np is not None:
weight = paddle.static.data(
name='weight', shape=weight_np.shape, dtype='float32'
)
feed_dict["weight"] = weight_np
if functional:
res = call_bce_functional(
logit, label, weight, reduction, pos_weight
)
else:
res = call_bce_layer(logit, label, weight, reduction, pos_weight)
exe = paddle.static.Executor(place)
static_result = exe.run(prog, feed=feed_dict, fetch_list=[res])
return static_result[0]
paddle.enable_static()
class TestBCEWithLogitsLoss(unittest.TestCase):
def test_BCEWithLogitsLoss(self):
logit_np = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float32)
label_np = np.random.randint(0, 2, size=(20, 30)).astype(np.float32)
places = [fluid.MLUPlace(0)]
reductions = ['sum', 'mean', 'none']
for place in places:
for reduction in reductions:
static_result = test_static(
place, logit_np, label_np, reduction=reduction
)
dy_result = test_dygraph(
place, logit_np, label_np, reduction=reduction
)
expected = calc_bce_with_logits_loss(
logit_np, label_np, reduction
)
np.testing.assert_allclose(static_result, expected, rtol=1e-6)
np.testing.assert_allclose(static_result, dy_result)
np.testing.assert_allclose(dy_result, expected, rtol=1e-6)
static_functional = test_static(
place,
logit_np,
label_np,
reduction=reduction,
functional=True,
)
dy_functional = test_dygraph(
place,
logit_np,
label_np,
reduction=reduction,
functional=True,
)
np.testing.assert_allclose(
static_functional, expected, rtol=1e-6
)
np.testing.assert_allclose(static_functional, dy_functional)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-6)
def test_BCEWithLogitsLoss_weight(self):
logit_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype(
np.float32
)
label_np = np.random.randint(0, 2, size=(2, 3, 4, 10)).astype(
np.float32
)
weight_np = np.random.random(size=(2, 3, 4, 10)).astype(np.float32)
place = fluid.MLUPlace(0)
for reduction in ['sum', 'mean', 'none']:
static_result = test_static(
place,
logit_np,
label_np,
weight_np=weight_np,
reduction=reduction,
)
dy_result = test_dygraph(
place,
logit_np,
label_np,
weight_np=weight_np,
reduction=reduction,
)
expected = calc_bce_with_logits_loss(
logit_np, label_np, reduction, weight_np=weight_np
)
np.testing.assert_allclose(static_result, expected, rtol=1e-6)
np.testing.assert_allclose(static_result, dy_result)
np.testing.assert_allclose(dy_result, expected, rtol=1e-6)
static_functional = test_static(
place,
logit_np,
label_np,
weight_np=weight_np,
reduction=reduction,
functional=True,
)
dy_functional = test_dygraph(
place,
logit_np,
label_np,
weight_np=weight_np,
reduction=reduction,
functional=True,
)
np.testing.assert_allclose(static_functional, expected, rtol=1e-6)
np.testing.assert_allclose(static_functional, dy_functional)
np.testing.assert_allclose(dy_functional, expected, rtol=1e-6)
def test_BCEWithLogitsLoss_pos_weight(self):
logit_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype(
np.float32
)
label_np = np.random.randint(0, 2, size=(2, 3, 4, 10)).astype(
np.float32
)
pos_weight_np = np.random.random(size=(3, 4, 10)).astype(np.float32)
weight_np = np.random.random(size=(2, 3, 4, 10)).astype(np.float32)
place = fluid.MLUPlace(0)
reduction = "mean"
static_result = test_static(
place, logit_np, label_np, weight_np, reduction, pos_weight_np
)
dy_result = test_dygraph(
place, logit_np, label_np, weight_np, reduction, pos_weight_np
)
expected = calc_bce_with_logits_loss(
logit_np, label_np, reduction, weight_np, pos_weight_np
)
np.testing.assert_allclose(static_result, expected)
np.testing.assert_allclose(static_result, dy_result)
np.testing.assert_allclose(dy_result, expected)
static_functional = test_static(
place,
logit_np,
label_np,
weight_np,
reduction,
pos_weight_np,
functional=True,
)
dy_functional = test_dygraph(
place,
logit_np,
label_np,
weight_np,
reduction,
pos_weight_np,
functional=True,
)
np.testing.assert_allclose(static_functional, expected)
np.testing.assert_allclose(static_functional, dy_functional)
np.testing.assert_allclose(dy_functional, expected)
def test_BCEWithLogitsLoss_error(self):
paddle.disable_static()
self.assertRaises(
ValueError,
paddle.nn.BCEWithLogitsLoss,
reduction="unsupport reduction",
)
logit = paddle.to_tensor([[0.1, 0.3]], dtype='float32')
label = paddle.to_tensor([[0.0, 1.0]], dtype='float32')
self.assertRaises(
ValueError,
paddle.nn.functional.binary_cross_entropy_with_logits,
logit=logit,
label=label,
reduction="unsupport reduction",
)
paddle.enable_static()
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append('..')
from eager_op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.nn.functional import interpolate
import paddle
paddle.enable_static()
def bilinear_interp_np(
input,
out_h,
out_w,
scale_w=0,
scale_h=0,
out_size=None,
actual_shape=None,
align_corners=True,
align_mode=0,
data_layout='NCHW',
):
"""bilinear interpolation implement in shape [N, C, H, W]"""
if data_layout == "NHWC":
input = np.transpose(input, (0, 3, 1, 2)) # NHWC => NCHW
if out_size is not None:
out_h = out_size[0]
out_w = out_size[1]
if actual_shape is not None:
out_h = actual_shape[0]
out_w = actual_shape[1]
batch_size, channel, in_h, in_w = input.shape
ratio_h = ratio_w = 0.0
if out_h > 1:
if align_corners:
ratio_h = (in_h - 1.0) / (out_h - 1.0)
else:
if scale_h > 0:
ratio_h = 1.0 / scale_h
else:
ratio_h = 1.0 * in_h / out_h
if out_w > 1:
if align_corners:
ratio_w = (in_w - 1.0) / (out_w - 1.0)
else:
if scale_w > 0:
ratio_w = 1.0 / scale_w
else:
ratio_w = 1.0 * in_w / out_w
out = np.zeros((batch_size, channel, out_h, out_w))
for i in range(out_h):
if align_mode == 0 and not align_corners:
h = int(ratio_h * (i + 0.5) - 0.5)
else:
h = int(ratio_h * i)
h = max(0, h)
hid = 1 if h < in_h - 1 else 0
if align_mode == 0 and not align_corners:
idx_src_h = max(ratio_h * (i + 0.5) - 0.5, 0)
h1lambda = idx_src_h - h
else:
h1lambda = ratio_h * i - h
h2lambda = 1.0 - h1lambda
for j in range(out_w):
if align_mode == 0 and not align_corners:
w = int(ratio_w * (j + 0.5) - 0.5)
else:
w = int(ratio_w * j)
w = max(0, w)
wid = 1 if w < in_w - 1 else 0
if align_mode == 0 and not align_corners:
idx_src_w = max(ratio_w * (j + 0.5) - 0.5, 0)
w1lambda = idx_src_w - w
else:
w1lambda = ratio_w * j - w
w2lambda = 1.0 - w1lambda
out[:, :, i, j] = h2lambda * (
w2lambda * input[:, :, h, w]
+ w1lambda * input[:, :, h, w + wid]
) + h1lambda * (
w2lambda * input[:, :, h + hid, w]
+ w1lambda * input[:, :, h + hid, w + wid]
)
if data_layout == "NHWC":
out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC
return out.astype(input.dtype)
class TestBilinearInterpOp(OpTest):
def setUp(self):
self.place = paddle.device.MLUPlace(0)
self.__class__.use_mlu = True
self.out_size = None
self.actual_shape = None
self.data_layout = 'NCHW'
self.init_test_case()
self.dtype = "float32"
self.op_type = "bilinear_interp_v2"
input_np = np.random.random(self.input_shape).astype(self.dtype)
if self.data_layout == "NCHW":
in_h = self.input_shape[2]
in_w = self.input_shape[3]
else:
in_h = self.input_shape[1]
in_w = self.input_shape[2]
scale_h = 0
scale_w = 0
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if self.scale > 0.0:
scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
scale_w = scale_h = self.scale[0]
elif isinstance(self.scale, list) and len(self.scale) > 1:
scale_w = self.scale[1]
scale_h = self.scale[0]
out_h = int(in_h * scale_h)
out_w = int(in_w * scale_w)
else:
out_h = self.out_h
out_w = self.out_w
output_np = bilinear_interp_np(
input_np,
out_h,
out_w,
0,
0,
self.out_size,
self.actual_shape,
self.align_corners,
self.align_mode,
self.data_layout,
)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
self.attrs = {
'out_h': self.out_h,
'out_w': self.out_w,
'interp_method': self.interp_method,
'align_corners': self.align_corners,
'align_mode': self.align_mode,
'data_layout': self.data_layout,
}
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if self.scale > 0.0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
self.scale = [self.scale[0], self.scale[0]]
self.attrs['scale'] = self.scale
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 5]
self.out_h = 2
self.out_w = 2
self.scale = 0.0
self.out_size = np.array([3, 3]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase1(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.0
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase2(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.0
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase3(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.0
self.align_corners = True
self.align_mode = 1
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-5)
class TestBilinearInterpCase4(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.0
self.out_size = np.array([2, 2]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase5(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.0
self.out_size = np.array([11, 11]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase6(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.0
self.out_size = np.array([65, 33]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpCase7(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = [2.0, 0.5]
self.align_corners = False
self.align_mode = 1
class TestBilinearInterpSame(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 32, 64]
self.out_h = 32
self.out_w = 64
self.scale = 0.0
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpActualShape(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.0
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpDataLayout(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 5, 5, 3]
self.out_h = 2
self.out_w = 2
self.scale = 0.0
self.out_size = np.array([3, 3]).astype("int32")
self.align_corners = True
self.align_mode = 1
self.data_layout = "NHWC"
class TestBilinearInterpOtherMethod1(TestBilinearInterpOp):
def set_align_mode(self):
self.align_corners = False
self.align_mode = 1
class TestBilinearInterpWithMethod2(TestBilinearInterpOp):
def set_align_mode(self):
self.align_corners = False
self.align_mode = 0
class TestBilinearInterpWithMethod3(TestBilinearInterpOp):
def set_align_mode(self):
self.align_corners = True
self.align_mode = 0
class TestBilinearInterpScale1(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 2.0
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpScale2(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 1.0
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpScale3(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 1.5
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpScale4(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = [1.5, 0.5]
self.align_corners = True
self.align_mode = 1
class TestBilinearInterpZero(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 7]
self.out_h = 60
self.out_w = 25
self.scale = 0.2
self.align_corners = False
self.align_mode = 0
class TestBilinearInterpOp_attr_tensor(OpTest):
def setUp(self):
self.place = paddle.device.MLUPlace(0)
self.__class__.use_mlu = True
self.out_size = None
self.actual_shape = None
self.init_test_case()
self.op_type = "bilinear_interp_v2"
self.shape_by_1Dtensor = False
self.scale_by_1Dtensor = False
self.attrs = {
'interp_method': self.interp_method,
'align_corners': self.align_corners,
}
input_np = np.random.random(self.input_shape).astype("float32")
self.inputs = {'X': input_np}
if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float32")
elif self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if self.scale > 0:
scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
scale_w = scale_h = self.scale[0]
elif isinstance(self.scale, list) and len(self.scale) > 1:
scale_w = self.scale[1]
scale_h = self.scale[0]
out_h = int(self.input_shape[2] * scale_h)
out_w = int(self.input_shape[3] * scale_w)
else:
out_h = self.out_h
out_w = self.out_w
if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size
elif self.out_size is not None:
size_tensor = []
for index, ele in enumerate(self.out_size):
size_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele)
)
self.inputs['SizeTensor'] = size_tensor
self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if self.scale > 0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
self.scale = [self.scale[0], self.scale[0]]
self.attrs['scale'] = self.scale
output_np = bilinear_interp_np(
input_np,
out_h,
out_w,
0,
0,
self.out_size,
self.actual_shape,
self.align_corners,
)
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 5, 5]
self.out_h = 3
self.out_w = 3
self.scale = 0.0
self.out_size = [3, 3]
self.align_corners = True
# out_size is a 1-D tensor
class TestBilinearInterp_attr_tensor_Case1(TestBilinearInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.0
self.out_size = [8, 12]
self.align_corners = True
# scale is a 1-D tensor
class TestBilinearInterp_attr_tensor_Case2(TestBilinearInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.0
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
self.shape_by_1Dtensor = True
# scale is a 1-D tensor
class TestBilinearInterp_attr_tensor_Case3(TestBilinearInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 2.0
self.out_size = None
self.align_corners = True
self.scale_by_1Dtensor = True
class TestBilinearInterpOpAPI_dy(unittest.TestCase):
def test_case(self):
import paddle
if core.is_compiled_with_mlu():
place = paddle.device.MLUPlace(0)
else:
place = core.CPUPlace()
with fluid.dygraph.guard(place):
input_data = np.random.random((2, 3, 6, 6)).astype("float32")
input_x = paddle.to_tensor(input_data)
expect_res = bilinear_interp_np(
input_data, out_h=12, out_w=12, align_corners=False
)
out = interpolate(
x=input_x, size=[12, 12], mode="bilinear", align_corners=False
)
np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6)
class TestBilinearInterpOpAPI_dy2(unittest.TestCase):
def test_case(self):
import paddle
if core.is_compiled_with_mlu():
place = paddle.device.MLUPlace(0)
else:
place = core.CPUPlace()
with fluid.dygraph.guard(place):
input_data = np.random.random((2, 3, 6, 6)).astype("float32")
size_np = np.array([12, 12]).astype("int64")
input_x = paddle.to_tensor(input_data)
size = paddle.to_tensor(size_np)
expect_res = bilinear_interp_np(
input_data, out_h=12, out_w=12, align_corners=False
)
out = interpolate(
x=input_x, size=size, mode="bilinear", align_corners=False
)
np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6)
class TestBilinearInterpOpAPI_dy3(unittest.TestCase):
def test_case(self):
import paddle
if core.is_compiled_with_mlu():
place = paddle.device.MLUPlace(0)
else:
place = core.CPUPlace()
with fluid.dygraph.guard(place):
input_data = np.random.random((2, 3, 6, 6)).astype("float32")
size_1 = np.array([12]).astype("int64")
input_x = paddle.to_tensor(input_data)
size = paddle.to_tensor(size_1)
expect_res = bilinear_interp_np(
input_data, out_h=12, out_w=12, align_corners=False
)
out = interpolate(
x=input_x,
size=[size, size],
mode="bilinear",
align_corners=False,
)
np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6)
class TestBilinearInterpOpAPI_dy4(unittest.TestCase):
def test_case(self):
import paddle
if core.is_compiled_with_mlu():
place = paddle.device.MLUPlace(0)
else:
place = core.CPUPlace()
with fluid.dygraph.guard(place):
input_data = np.random.random((2, 3, 6, 6)).astype("float32")
scale_np = np.array([2, 2]).astype("int64")
input_x = paddle.to_tensor(input_data)
scale = paddle.to_tensor(scale_np)
expect_res = bilinear_interp_np(
input_data, out_h=12, out_w=12, align_corners=False
)
out = interpolate(
x=input_x,
scale_factor=scale,
mode="bilinear",
align_corners=False,
)
np.testing.assert_allclose(out.numpy(), expect_res, rtol=1e-6)
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
paddle.enable_static()
class TestCastOpFp32ToFp16(OpTest):
def setUp(self):
ipt = np.random.random(size=[10, 10])
self.inputs = {'X': ipt.astype('float32')}
self.outputs = {'Out': ipt.astype('float16')}
self.attrs = {
'in_dtype': int(core.VarDesc.VarType.FP32),
'out_dtype': int(core.VarDesc.VarType.FP16),
}
self.op_type = 'cast'
self.place = paddle.device.MLUPlace(0)
self.__class__.use_mlu = True
self.__class__.no_need_check_grad = True
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-3)
class TestCastOpFp16ToFp32(OpTest):
def setUp(self):
ipt = np.random.random(size=[10, 10])
self.inputs = {'X': ipt.astype('float16')}
self.outputs = {'Out': ipt.astype('float32')}
self.attrs = {
'in_dtype': int(core.VarDesc.VarType.FP16),
'out_dtype': int(core.VarDesc.VarType.FP32),
}
self.op_type = 'cast'
self.place = paddle.device.MLUPlace(0)
self.__class__.use_mlu = True
self.__class__.no_need_check_grad = True
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-3)
class TestCastOpFp32ToFp64(OpTest):
def setUp(self):
ipt = np.random.random(size=[10, 10])
self.inputs = {'X': ipt.astype('float32')}
self.outputs = {'Out': ipt.astype('float64')}
self.attrs = {
'in_dtype': int(core.VarDesc.VarType.FP32),
'out_dtype': int(core.VarDesc.VarType.FP64),
}
self.op_type = 'cast'
self.place = paddle.device.MLUPlace(0)
self.__class__.use_mlu = True
self.__class__.no_need_check_grad = True
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-3)
class TestCastOpInt32ToInt32(OpTest):
def setUp(self):
ipt = np.random.randint(1000, size=(10, 10))
self.inputs = {'X': ipt.astype('int32')}
self.outputs = {'Out': ipt.astype('int32')}
self.attrs = {
'in_dtype': int(core.VarDesc.VarType.INT32),
'out_dtype': int(core.VarDesc.VarType.INT32),
}
self.op_type = 'cast'
self.place = paddle.device.MLUPlace(0)
self.__class__.use_mlu = True
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-3)
class TestCastOpInt32ToFp32(OpTest):
def setUp(self):
ipt = np.random.randint(1000, size=[10, 10])
self.inputs = {'X': ipt.astype('int32')}
self.outputs = {'Out': ipt.astype('float32')}
self.attrs = {
'in_dtype': int(core.VarDesc.VarType.INT32),
'out_dtype': int(core.VarDesc.VarType.FP32),
}
self.op_type = 'cast'
self.place = paddle.device.MLUPlace(0)
self.__class__.use_mlu = True
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-3)
class TestCastOpInt16ToFp64(OpTest):
def setUp(self):
ipt = np.random.randint(1000, size=[10, 10])
self.inputs = {'X': ipt.astype('int16')}
self.outputs = {'Out': ipt.astype('int64')}
self.attrs = {
'in_dtype': int(core.VarDesc.VarType.INT16),
'out_dtype': int(core.VarDesc.VarType.INT64),
}
self.op_type = 'cast'
self.place = paddle.device.MLUPlace(0)
self.__class__.use_mlu = True
def test_check_output(self):
self.check_output_with_place(self.place, atol=1e-3)
class TestCastOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of cast_op must be Variable.
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.MLUPlace(0)
)
self.assertRaises(TypeError, paddle.cast, x1, 'int32')
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
sys.path.append("..")
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from eager_op_test import OpTest
paddle.enable_static()
class TestClipOp(OpTest):
def setUp(self):
self.place = paddle.device.MLUPlace(0)
self.__class__.use_mlu = True
self.max_relative_error = 0.006
self.inputs = {}
self.initTestCase()
self.op_type = "clip"
self.attrs = {}
self.attrs['min'] = self.min
self.attrs['max'] = self.max
if 'Min' in self.inputs:
min_v = self.inputs['Min']
else:
min_v = self.attrs['min']
if 'Max' in self.inputs:
max_v = self.inputs['Max']
else:
max_v = self.attrs['max']
input = np.random.random(self.shape).astype(self.dtype)
input[np.abs(input - min_v) < self.max_relative_error] = 0.5
input[np.abs(input - max_v) < self.max_relative_error] = 0.5
self.inputs['X'] = input
self.outputs = {'Out': np.clip(self.inputs['X'], min_v, max_v)}
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad_normal(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
def initTestCase(self):
self.dtype = np.float32
self.shape = (4, 10, 10)
self.max = 0.8
self.min = 0.3
self.inputs['Max'] = np.array([0.8]).astype(self.dtype)
self.inputs['Min'] = np.array([0.1]).astype(self.dtype)
class TestCase1(TestClipOp):
def initTestCase(self):
self.dtype = np.float32
self.shape = (8, 16, 8)
self.max = 0.7
self.min = 0.0
class TestCase2(TestClipOp):
def initTestCase(self):
self.dtype = np.float32
self.shape = (8, 16)
self.max = 1.0
self.min = 0.0
class TestCase3(TestClipOp):
def initTestCase(self):
self.dtype = np.float32
self.shape = (4, 8, 16)
self.max = 0.7
self.min = 0.2
class TestCase4(TestClipOp):
def initTestCase(self):
self.dtype = np.float32
self.shape = (4, 8, 8)
self.max = 0.7
self.min = 0.2
self.inputs['Max'] = np.array([0.8]).astype(self.dtype)
self.inputs['Min'] = np.array([0.3]).astype(self.dtype)
class TestCase5(TestClipOp):
def initTestCase(self):
self.dtype = np.float32
self.shape = (4, 8, 16)
self.max = 0.5
self.min = 0.5
class TestCase6(TestClipOp):
def initTestCase(self):
self.dtype = np.float16
self.shape = (4, 8, 8)
self.max = 0.7
self.min = 0.2
self.inputs['Max'] = np.array([0.8]).astype(self.dtype)
self.inputs['Min'] = np.array([0.3]).astype(self.dtype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append('..')
from eager_op_test import OpTest
from paddle.fluid import core
import paddle
alignment = 256
paddle.enable_static()
class TestAllocContinuousSpace(OpTest):
def setUp(self):
self.op_type = "coalesce_tensor"
self.dtype, self.fluid_dtype = self.init_dtype()
attrs = self.init_attr()
self.copy_data = attrs["copy_data"]
self.constant = attrs["constant"]
self.set_constant = attrs["set_constant"]
self.Inputs = self.init_input()
self.Outputs, self.FusedOutput = self.init_output(
self.Inputs, self.set_constant, self.constant
)
self.inputs = {'Input': self.Inputs}
self.attrs = attrs
self.outputs = {'Output': self.Outputs, 'FusedOutput': self.FusedOutput}
def init_dtype(self):
return np.float32, int(core.VarDesc.VarType.FP32)
def init_input(self):
inputs = []
inputs.append(("x1", np.random.random([20, 3]).astype(self.dtype)))
inputs.append(("x2", np.random.random([20]).astype(self.dtype)))
inputs.append(("x3", np.random.random([1]).astype(self.dtype)))
inputs.append(("x4", np.random.random([200, 30]).astype(self.dtype)))
inputs.append(("x5", np.random.random([30]).astype(self.dtype)))
inputs.append(("x6", np.random.random([1]).astype(self.dtype)))
return inputs
def init_attr(self):
return {
"copy_data": True,
"set_constant": False,
"constant": 0.0,
"dtype": self.fluid_dtype,
}
def init_output(self, input_list, set_constant, constant):
inputs = []
outputs = input_list
for input in input_list:
length = len(input[1].flatten())
aligned_len = (length + alignment) / alignment * alignment
out = np.zeros(int(aligned_len))
out[0:length] = input[1].flatten()
inputs.append(out)
coalesce_tensor_var = np.concatenate([input for input in inputs])
if set_constant:
coalesce_tensor_var = np.ones((len(coalesce_tensor_var))) * constant
outputs = [
(out[0], np.ones(out[1].shape).astype(self.dtype) * constant)
for out in outputs
]
return outputs, coalesce_tensor_var
def test_check_output(self):
self.check_output_with_place(
place=paddle.device.MLUPlace(0),
no_check_set=["FusedOutput"],
atol=1e-5,
)
class TestAllocContinuousSpace2(TestAllocContinuousSpace):
def init_attr(self):
return {
"copy_data": False,
"set_constant": True,
"constant": 5,
"dtype": self.fluid_dtype,
"user_defined_size_of_dtype": 2,
}
def test_check_output(self):
self.check_output_with_place(
place=paddle.device.MLUPlace(0),
no_check_set=["FusedOutput"],
atol=1e-5,
)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
def create_test_class(op_type, typename, callback):
class Cls(OpTest):
def setUp(self):
self.set_mlu()
self.place = paddle.MLUPlace(0)
x = np.random.random(size=(10, 7)).astype(typename)
y = np.random.random(size=(10, 7)).astype(typename)
out = callback(x, y)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': out}
self.op_type = op_type
def set_mlu(self):
self.__class__.use_mlu = True
def test_output(self):
self.check_output_with_place(place=self.place)
def test_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()):
a = paddle.static.data(name='a', shape=[-1, 2], dtype='float32')
b = paddle.static.data(name='b', shape=[-1, 2], dtype='float32')
c = paddle.static.data(name='c', shape=[-1, 2], dtype='int16')
d = fluid.create_lod_tensor(np.array([[-1]]), [[1]], self.place)
op = eval("fluid.layers.%s" % self.op_type)
self.assertRaises(TypeError, op, x=a, y=b, axis=True)
self.assertRaises(TypeError, op, x=a, y=b, force_cpu=1)
self.assertRaises(TypeError, op, x=a, y=b, cond=1)
self.assertRaises(TypeError, op, x=a, y=c)
self.assertRaises(TypeError, op, x=c, y=a)
self.assertRaises(TypeError, op, x=a, y=d)
self.assertRaises(TypeError, op, x=d, y=a)
self.assertRaises(TypeError, op, x=c, y=d)
def test_dynamic_api(self):
paddle.disable_static()
paddle.set_device('mlu:0')
x = np.random.random(size=(10, 7)).astype(typename)
y = np.random.random(size=(10, 7)).astype(typename)
real_result = callback(x, y)
x = paddle.to_tensor(x, dtype=typename)
y = paddle.to_tensor(y, dtype=typename)
op = eval("paddle.%s" % (self.op_type))
out = op(x, y)
self.assertEqual((out.numpy() == real_result).all(), True)
@unittest.skipIf(typename == 'float16', "float16 is not supported now")
def test_broadcast_api_1(self):
paddle.enable_static()
with program_guard(Program(), Program()):
x = paddle.static.data(
name='x', shape=[1, 2, 1, 3], dtype=typename
)
y = paddle.static.data(
name='y', shape=[1, 2, 3], dtype=typename
)
op = eval("paddle.%s" % (self.op_type))
out = op(x, y)
exe = paddle.static.Executor(self.place)
input_x = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(typename)
input_y = np.arange(0, 6).reshape((1, 2, 3)).astype(typename)
real_result = callback(input_x, input_y)
(res,) = exe.run(
feed={"x": input_x, "y": input_y}, fetch_list=[out]
)
self.assertEqual((res == real_result).all(), True)
@unittest.skipIf(typename == 'float16', "float16 is not supported now")
def test_broadcast_api_2(self):
paddle.enable_static()
with program_guard(Program(), Program()):
x = paddle.static.data(
name='x', shape=[1, 2, 3], dtype=typename
)
y = paddle.static.data(
name='y', shape=[1, 2, 1, 3], dtype=typename
)
op = eval("paddle.%s" % (self.op_type))
out = op(x, y)
exe = paddle.static.Executor(self.place)
input_x = np.arange(0, 6).reshape((1, 2, 3)).astype(typename)
input_y = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(typename)
real_result = callback(input_x, input_y)
(res,) = exe.run(
feed={"x": input_x, "y": input_y}, fetch_list=[out]
)
self.assertEqual((res == real_result).all(), True)
@unittest.skipIf(typename == 'float16', "float16 is not supported now")
def test_broadcast_api_3(self):
paddle.enable_static()
with program_guard(Program(), Program()):
x = paddle.static.data(name='x', shape=[5], dtype=typename)
y = paddle.static.data(name='y', shape=[3, 1], dtype=typename)
op = eval("paddle.%s" % (self.op_type))
out = op(x, y)
exe = paddle.static.Executor(self.place)
input_x = np.arange(0, 5).reshape((5)).astype(typename)
input_y = np.array([5, 3, 2]).reshape((3, 1)).astype(typename)
real_result = callback(input_x, input_y)
(res,) = exe.run(
feed={"x": input_x, "y": input_y}, fetch_list=[out]
)
self.assertEqual((res == real_result).all(), True)
@unittest.skipIf(typename == 'float16', "float16 is not supported now")
def test_attr_name(self):
paddle.enable_static()
with program_guard(Program(), Program()):
x = paddle.static.data(name='x', shape=[-1, 4], dtype=typename)
y = paddle.static.data(name='y', shape=[-1, 4], dtype=typename)
op = eval("paddle.%s" % (self.op_type))
out = op(x=x, y=y, name="name_%s" % (self.op_type))
self.assertEqual("name_%s" % (self.op_type) in out.name, True)
cls_name = "{0}_{1}".format(op_type, typename)
Cls.__name__ = cls_name
globals()[cls_name] = Cls
for _type_name in {'float16', 'float32', 'int32', 'bool'}:
if _type_name == 'int32' or _type_name == 'bool':
create_test_class('equal', _type_name, lambda _a, _b: _a == _b)
continue
create_test_class('equal', _type_name, lambda _a, _b: _a == _b)
create_test_class('not_equal', _type_name, lambda _a, _b: _a != _b)
create_test_class('less_than', _type_name, lambda _a, _b: _a < _b)
create_test_class('less_equal', _type_name, lambda _a, _b: _a <= _b)
create_test_class('greater_than', _type_name, lambda _a, _b: _a > _b)
create_test_class('greater_equal', _type_name, lambda _a, _b: _a >= _b)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import sys
sys.path.append("..")
from eager_op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid as fluid
paddle.enable_static()
SEED = 2021
class TestConcatOp(OpTest):
def setUp(self):
self.set_mlu()
self.op_type = "concat"
self.place = paddle.device.MLUPlace(0)
self.init_dtype()
self.init_test_data()
self.inputs = {'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]}
self.attrs = {'axis': self.axis}
if self.axis < 0:
self.actual_axis = self.axis + len(self.x0.shape)
self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
else:
self.actual_axis = self.axis
self.outputs = {
'Out': np.concatenate(
(self.x0, self.x1, self.x2), axis=self.actual_axis
)
}
def set_mlu(self):
self.__class__.use_mlu = True
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['x0', 'x2'], 'Out')
self.check_grad_with_place(self.place, ['x1'], 'Out')
self.check_grad_with_place(self.place, ['x2'], 'Out')
def init_test_data(self):
self.x0 = np.random.random((1, 4, 50)).astype(self.dtype)
self.x1 = np.random.random((2, 4, 50)).astype(self.dtype)
self.x2 = np.random.random((3, 4, 50)).astype(self.dtype)
self.axis = 0
class TestConcatOp2(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.axis = 1
@skip_check_grad_ci(
reason="The function 'check_grad' for large inputs is too slow."
)
class TestConcatOp3(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((1, 256, 170, 256)).astype(self.dtype)
self.x1 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
self.x2 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
self.axis = 1
def test_check_grad(self):
pass
@skip_check_grad_ci(
reason="This test will meet fetch error when there is a null grad. The detailed information is in PR#17015."
)
class TestConcatOp4(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((0, 3, 4, 5)).astype(self.dtype)
self.axis = 0
def test_check_grad(self):
pass
class TestConcatOp5(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype)
self.axis = -3
# ----------------Concat Fp16----------------
def create_test_fp16(parent):
class TestConcatFp16(parent):
def init_dtype(self):
self.dtype = np.float16
cls_name = "{0}_{1}".format(parent.__name__, "Fp16")
TestConcatFp16.__name__ = cls_name
globals()[cls_name] = TestConcatFp16
create_test_fp16(TestConcatOp)
create_test_fp16(TestConcatOp2)
create_test_fp16(TestConcatOp3)
create_test_fp16(TestConcatOp4)
create_test_fp16(TestConcatOp5)
# ----------------Concat Int64----------------
def create_test_int64(parent):
class TestConcatInt64(parent):
def init_dtype(self):
self.dtype = np.int64
def test_check_grad(self):
pass
cls_name = "{0}_{1}".format(parent.__name__, "Int64")
TestConcatInt64.__name__ = cls_name
globals()[cls_name] = TestConcatInt64
create_test_int64(TestConcatOp)
create_test_int64(TestConcatOp2)
create_test_int64(TestConcatOp3)
create_test_int64(TestConcatOp4)
create_test_int64(TestConcatOp5)
# ----------------Concat Int32----------------
def create_test_int32(parent):
class TestConcatInt32(parent):
def init_dtype(self):
self.dtype = np.int32
def test_check_grad(self):
pass
cls_name = "{0}_{1}".format(parent.__name__, "Int32")
TestConcatInt32.__name__ = cls_name
globals()[cls_name] = TestConcatInt32
create_test_int32(TestConcatOp)
create_test_int32(TestConcatOp2)
create_test_int32(TestConcatOp3)
create_test_int32(TestConcatOp4)
create_test_int32(TestConcatOp5)
# ----------------Concat AxisTensor----------------
def create_test_AxisTensor(parent):
class TestConcatAxisTensor(parent):
def setUp(self):
self.op_type = "concat"
self.init_dtype()
self.init_test_data()
self.inputs = {
'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)],
'AxisTensor': np.array([self.axis]).astype("int32"),
}
self.attrs = {}
if self.axis < 0:
self.actual_axis = self.axis + len(self.x0.shape)
self.actual_axis = (
self.actual_axis if self.actual_axis > 0 else 0
)
else:
self.actual_axis = self.axis
self.outputs = {
'Out': np.concatenate(
(self.x0, self.x1, self.x2), axis=self.actual_axis
)
}
self.place = paddle.device.MLUPlace(0)
self.__class__.use_mlu = True
def init_test_data(self):
self.x0 = np.random.random((1, 4, 50)).astype(self.dtype)
self.x1 = np.random.random((2, 4, 50)).astype(self.dtype)
self.x2 = np.random.random((3, 4, 50)).astype(self.dtype)
self.axis = 0
def init_dtype(self):
self.dtype = np.float32
cls_name = "{0}_{1}".format(parent.__name__, "AxisTensor")
TestConcatAxisTensor.__name__ = cls_name
globals()[cls_name] = TestConcatAxisTensor
create_test_AxisTensor(TestConcatOp)
create_test_AxisTensor(TestConcatOp2)
create_test_AxisTensor(TestConcatOp3)
create_test_AxisTensor(TestConcatOp4)
create_test_AxisTensor(TestConcatOp5)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append("..")
import paddle
paddle.enable_static()
import paddle.fluid.core as core
import paddle.fluid as fluid
from eager_op_test import OpTest
from paddle.fluid import Program, program_guard
from test_conv2d_op_mlu import (
TestConv2DOp,
TestConv2DOp_v2,
create_test_padding_SAME_class,
create_test_padding_VALID_class,
create_test_channel_last_class,
create_test_fp16_class,
)
# ----------------TestDepthwiseConv -----
class TestDepthwiseConv(TestConv2DOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConv2(TestConv2DOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConv3(TestConv2DOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConvandFuse(TestConv2DOp):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConv2andFuse(TestConv2DOp):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConv3andFuse(TestConv2DOp):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
class TestDepthwiseConv_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 1, 0, 1]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv2_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [0, 1, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv3_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 1, 0, 0]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConvandFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.pad = [1, 1]
self.stride = [2, 2]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [2, 1, 2, 3]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv2andFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [12, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 1, 1, 2]
self.padding_algorithm = "EXPLICIT"
class TestDepthwiseConv3andFuse_AsyPadding(TestConv2DOp_v2):
def init_test_case(self):
self.fuse_relu_before_depthwise_conv = True
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
self.groups = 3
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [24, f_c, 3, 3]
self.op_type = "depthwise_conv2d"
def init_paddings(self):
self.pad = [1, 2, 0, 2]
self.padding_algorithm = "EXPLICIT"
# depthwise conv2d
create_test_padding_SAME_class(TestDepthwiseConv_AsyPadding)
create_test_padding_SAME_class(TestDepthwiseConvandFuse_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConv_AsyPadding)
create_test_padding_VALID_class(TestDepthwiseConvandFuse_AsyPadding)
# channel last
create_test_channel_last_class(TestDepthwiseConv_AsyPadding)
create_test_channel_last_class(TestDepthwiseConvandFuse_AsyPadding)
create_test_fp16_class(TestDepthwiseConv_AsyPadding)
create_test_fp16_class(TestDepthwiseConvandFuse_AsyPadding)
# TODO(MLU): Depthwise opration does not support dilation yet
# it will throw an error of CNNL_STATUS_NOT_SUPPORTED.
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle
import sys
sys.path.append("..")
from eager_op_test import OpTest
paddle.enable_static()
class TestFlattenOp(OpTest):
def setUp(self):
self.place = paddle.device.MLUPlace(0)
self.__class__.use_mlu = True
self.op_type = "flatten2"
self.init_test_case()
self.inputs = {"X": np.random.random(self.in_shape).astype("float64")}
self.init_attrs()
self.outputs = {
"Out": self.inputs["X"].reshape(self.new_shape),
"XShape": np.random.random(self.in_shape).astype("float32"),
}
def test_check_output(self):
self.check_output_with_place(self.place, no_check_set=["XShape"])
def test_check_grad(self):
self.check_grad_with_place(self.place, ["X"], "Out")
def init_test_case(self):
self.in_shape = (3, 2, 4, 5)
self.axis = 1
self.new_shape = (3, 40)
def init_attrs(self):
self.attrs = {"axis": self.axis}
class TestFlattenOp1(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.axis = 0
self.new_shape = (1, 120)
class TestFlattenOpWithDefaultAxis(TestFlattenOp):
def init_test_case(self):
self.in_shape = (10, 2, 2, 3)
self.new_shape = (10, 12)
def init_attrs(self):
self.attrs = {}
class TestFlattenOpSixDims(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 3, 2, 4, 4)
self.axis = 4
self.new_shape = (36, 16)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册