未验证 提交 90ee3666 编写于 作者: S Steffy-zxf 提交者: GitHub

update ops's unittest data type from float32 to float64 and shape over 100 (#22544)

* update ops's unittest of elementwise_pow, elementwise_max, elementwise_min, scale and sqrt
1. update elementwise_pow, elementwise_max and scale's unitests with input data type (float32 -> float64)
2. fix bug that the elementwise_pow doesn't meet threshold requirements with tackling float64 data
3. remove sqrt from op_accuracy_white_list.py
4. update the unittests of elementwise_pow, elementwise_max and elementwise_min ops that their input data shape over 100
5. test=develop

* modify the writing style according suggestions
test=develop
上级 57de4842
......@@ -20,12 +20,13 @@ limitations under the License. */
namespace paddle {
namespace operators {
static inline float GetAttrFromTensor(const framework::Tensor* tensor) {
const float* tensor_data = tensor->data<float>();
template <typename T>
static inline T GetAttrFromTensor(const framework::Tensor* tensor) {
const auto* tensor_data = tensor->data<T>();
framework::Tensor cpu_tensor;
if (platform::is_gpu_place(tensor->place())) {
TensorCopySync(*tensor, platform::CPUPlace(), &cpu_tensor);
tensor_data = cpu_tensor.data<float>();
tensor_data = cpu_tensor.data<T>();
}
return tensor_data[0];
}
......@@ -43,7 +44,7 @@ class ScaleKernel : public framework::OpKernel<T> {
auto scale = static_cast<T>(ctx.Attr<float>("scale"));
if (ctx.HasInput("ScaleTensor")) {
auto* scale_tensor = ctx.Input<framework::Tensor>("ScaleTensor");
scale = GetAttrFromTensor(scale_tensor);
scale = GetAttrFromTensor<T>(scale_tensor);
}
auto* out_var = ctx.OutputVar("Out");
......
......@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from op_test import OpTest, skip_check_grad_ci
class TestElementwiseOp(OpTest):
......@@ -25,9 +25,9 @@ class TestElementwiseOp(OpTest):
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float32")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float32")
x = np.random.uniform(0.1, 1, [13, 17]).astype("float64")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float64")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
......@@ -46,11 +46,13 @@ class TestElementwiseOp(OpTest):
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseMaxOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32")
y = np.array([0.5]).astype("float32")
x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float64")
y = np.array([0.5]).astype("float64")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
......@@ -58,9 +60,9 @@ class TestElementwiseMaxOp_scalar(TestElementwiseOp):
class TestElementwiseMaxOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.random((100, )).astype("float32")
sgn = np.random.choice([-1, 1], (100, )).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float32")
x = np.random.random((100, )).astype("float64")
sgn = np.random.choice([-1, 1], (100, )).astype("float64")
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float64")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
......@@ -68,73 +70,73 @@ class TestElementwiseMaxOp_Vector(TestElementwiseOp):
class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 3, 20)).astype(np.float32)
sgn = np.random.choice([-1, 1], (2, )).astype(np.float32)
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float64)
y = x[:, 0, 0] + sgn * \
np.random.uniform(1, 2, (2, )).astype(np.float32)
np.random.uniform(1, 2, (100, )).astype(np.float64)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0}
self.outputs = {
'Out':
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(2, 1, 1))
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 3, 20)).astype(np.float32)
sgn = np.random.choice([-1, 1], (3, )).astype(np.float32)
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float64)
y = x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (3, )).astype(np.float32)
np.random.uniform(1, 2, (100, )).astype(np.float64)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 1))
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1))
}
class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (10, 3, 4)).astype(np.float32)
sgn = np.random.choice([-1, 1], (4, )).astype(np.float32)
x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float64)
y = x[0, 0, :] + sgn * \
np.random.uniform(1, 2, (4, )).astype(np.float32)
np.random.uniform(1, 2, (100, )).astype(np.float64)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out':
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 4))
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100))
}
class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32)
sgn = np.random.choice([-1, 1], (3, 4)).astype(np.float32)
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float64)
sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float64)
y = x[0, :, :, 0] + sgn * \
np.random.uniform(1, 2, (3, 4)).astype(np.float32)
np.random.uniform(1, 2, (50, 2)).astype(np.float64)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 4, 1))
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1))
}
class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float32)
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float64)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float64)
y = x + sgn * \
np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float32)
np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float64)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
......
......@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from op_test import OpTest, skip_check_grad_ci
class TestElementwiseOp(OpTest):
......@@ -46,6 +46,8 @@ class TestElementwiseOp(OpTest):
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseMinOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
......@@ -68,73 +70,73 @@ class TestElementwiseMinOp_Vector(TestElementwiseOp):
class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float64)
sgn = np.random.choice([-1, 1], (2, )).astype(np.float64)
x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float64)
y = x[:, 0, 0] + sgn * \
np.random.uniform(1, 2, (2, )).astype(np.float64)
np.random.uniform(1, 2, (100, )).astype(np.float64)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(2, 1, 1))
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float64)
sgn = np.random.choice([-1, 1], (3, )).astype(np.float64)
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float64)
y = x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (3, )).astype(np.float64)
np.random.uniform(1, 2, (100, )).astype(np.float64)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 1))
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1))
}
class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float64)
sgn = np.random.choice([-1, 1], (4, )).astype(np.float64)
x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(np.float64)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float64)
y = x[0, 0, :] + sgn * \
np.random.uniform(1, 2, (4, )).astype(np.float64)
np.random.uniform(1, 2, (100, )).astype(np.float64)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 4))
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100))
}
class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float64)
sgn = np.random.choice([-1, 1], (3, 4)).astype(np.float64)
x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(np.float64)
sgn = np.random.choice([-1, 1], (25, 4)).astype(np.float64)
y = x[0, :, :, 0] + sgn * \
np.random.uniform(1, 2, (3, 4)).astype(np.float64)
np.random.uniform(1, 2, (25, 4)).astype(np.float64)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 4, 1))
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 25, 4, 1))
}
class TestElementwiseMinOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float64)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float64)
x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(np.float64)
sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(np.float64)
y = x + sgn * \
np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float64)
np.random.uniform(1, 2, (2, 10, 1, 5)).astype(np.float64)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
......
......@@ -15,7 +15,7 @@
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from op_test import OpTest, skip_check_grad_ci
import paddle.fluid as fluid
......@@ -23,8 +23,8 @@ class TestElementwisePowOp(OpTest):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [2, 3]).astype("float64")
'X': np.random.uniform(1, 2, [20, 5]).astype("float64"),
'Y': np.random.uniform(1, 2, [20, 5]).astype("float64")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
......@@ -39,7 +39,7 @@ class TestElementwisePowOp_big_shape_1(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [10, 10]).astype("float64"),
'X': np.random.uniform(1, 2, [10, 10]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [10, 10]).astype("float64")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
......@@ -49,12 +49,14 @@ class TestElementwisePowOp_big_shape_2(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [10, 10]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [10, 10]).astype("float64") * 20
'X': np.random.uniform(1, 2, [10, 10]).astype("float64"),
'Y': np.random.uniform(0.2, 2, [10, 10]).astype("float64")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwisePowOp_scalar(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
......@@ -69,8 +71,8 @@ class TestElementwisePowOp_tensor(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [32]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [32]).astype("float64")
'X': np.random.uniform(0.1, 1, [100]).astype("float64"),
'Y': np.random.uniform(1, 3, [100]).astype("float64")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
......@@ -79,8 +81,8 @@ class TestElementwisePowOp_broadcast_0(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [4]).astype("float64")
'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float64")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
......@@ -89,12 +91,12 @@ class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [3]).astype("float64")
'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float64")
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(3, 1))
'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1))
}
......@@ -102,12 +104,13 @@ class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [2]).astype("float64")
'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float64")
}
self.attrs = {'axis': 0}
self.outputs = {
'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(2, 1, 1))
'Out':
np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
......@@ -115,12 +118,12 @@ class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [3, 4]).astype("float64")
'X': np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [20, 5]).astype("float64")
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 4,
'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(1, 20, 5,
1))
}
......@@ -129,8 +132,8 @@ class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [2, 3, 1, 5]).astype("float64")
'X': np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype("float64")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
......
......@@ -24,7 +24,7 @@ from paddle.fluid.op import Operator
class TestScaleOp(OpTest):
def setUp(self):
self.op_type = "scale"
self.dtype = np.float32
self.dtype = np.float64
self.init_dtype_type()
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
self.attrs = {'scale': -2.3}
......@@ -45,12 +45,12 @@ class TestScaleOp(OpTest):
class TestScaleOpScaleVariable(OpTest):
def setUp(self):
self.op_type = "scale"
self.dtype = np.float32
self.dtype = np.float64
self.init_dtype_type()
self.scale = -2.3
self.inputs = {
'X': np.random.random((10, 10)).astype(self.dtype),
'ScaleTensor': np.array([self.scale]).astype('float32')
'ScaleTensor': np.array([self.scale]).astype('float64')
}
self.attrs = {}
self.outputs = {'Out': self.inputs['X'] * self.dtype(self.scale)}
......@@ -72,7 +72,7 @@ class TestScaleOpSelectedRows(unittest.TestCase):
def check_with_place(self, place, in_name, out_name):
scope = core.Scope()
self.dtype = np.float32
self.dtype = np.float64
self.init_dtype_type()
# create and initialize Grad Variable
......
......@@ -15,9 +15,6 @@
NEED_TO_FIX_OP_LIST = [
'elementwise_mul',
'elementwise_div',
'elementwise_max',
'elementwise_min',
'elementwise_pow',
'fused_elemwise_activation',
'bilinear_tensor_product',
'conv2d_transpose',
......
......@@ -31,7 +31,6 @@ NO_FP64_CHECK_GRAD_OP_LIST = [
'depthwise_conv2d', \
'depthwise_conv2d_transpose', \
'dropout', \
'elementwise_max', \
'fused_elemwise_activation', \
'hierarchical_sigmoid', \
'hinge_loss', \
......@@ -59,7 +58,6 @@ NO_FP64_CHECK_GRAD_OP_LIST = [
'reshape2', \
'roi_perspective_transform', \
'row_conv', \
'scale', \
'scatter', \
'sequence_conv', \
'sequence_pool', \
......@@ -71,7 +69,6 @@ NO_FP64_CHECK_GRAD_OP_LIST = [
'smooth_l1_loss', \
'softmax', \
'spectral_norm', \
'sqrt', \
'squared_l2_distance', \
'squared_l2_norm', \
'tanh', \
......
......@@ -20,7 +20,6 @@ NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST = [
'conv3d', \
'cross_entropy', \
'depthwise_conv2d_transpose', \
'elementwise_pow', \
'grid_sampler', \
'group_norm', \
'gru', \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册